query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
Mark a class as Controller Resource
def add_resource(self, cls): # check if the same controller was already used for another cls (Resource) if ( hasattr(self, Controller.RESOURCE_CLASS_KEY) and getattr(self, Controller.RESOURCE_CLASS_KEY) != cls ): raise MultipleResourceException() # check if cls (Resource) was exteded from another if hasattr(cls, Controller.RC_KEY): self.__get_parent_routes(cls.__router__) setattr(cls, Controller.RC_KEY, self.router) setattr(self, Controller.RESOURCE_CLASS_KEY, cls) cls.router = lambda: Controller.__parse_controller_router(cls) return cls
[ "def create_controller(self, resource):\n\n return type('%sController' % resource.__name__, (self.default_controller,), {\n 'configuration': self,\n 'resource': resource,\n 'version': (resource.version, 0),\n })", "def resource(self, resource):\n self._resource = resource", "def create_controller() -> Controller:\n _controller = Controller()\n return _controller", "def setController(self, controller):\n\n # controller must be a callable\n if ( not Tool.isCallable(controller)) :\n raise LogicException(\n 'The controller must be a callable ({0} given).'.format(\n repr(controller)\n ));\n\n\n self.__controller = controller;", "def create_resource():\n #deserializer = ImageDeserializer()\n #serializer = ImageSerializer()\n return wsgi.Resource(Controller())", "def __init__(self, resource=None):\r\n super(VaImage, self).__init__(resource)\r\n self._parent = None\r\n\r\n other_features = ['chassis','system','system-common']\r\n self.controller = Controller(resource, other_features)\r\n\r\n common = '_'.join(('', 'system-common'))\r\n if common in self.controller.__dict__.keys():\r\n self._parent = self.controller.__dict__.get(common)", "def _set_controller(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"controller_name\",controller.controller, yang_name=\"controller\", rest_name=\"controller\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='controller-name', extensions={u'tailf-common': {u'info': u'OpenFlow controller configuration', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-incomplete-command': None, u'callpoint': u'OpenFlowGlobalControllerCallpoint'}}), is_container='list', yang_name=\"controller\", rest_name=\"controller\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'OpenFlow controller configuration', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-incomplete-command': None, u'callpoint': u'OpenFlowGlobalControllerCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"controller must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"controller_name\",controller.controller, yang_name=\"controller\", rest_name=\"controller\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='controller-name', extensions={u'tailf-common': {u'info': u'OpenFlow controller configuration', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-incomplete-command': None, u'callpoint': u'OpenFlowGlobalControllerCallpoint'}}), is_container='list', yang_name=\"controller\", rest_name=\"controller\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'OpenFlow controller configuration', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-incomplete-command': None, u'callpoint': u'OpenFlowGlobalControllerCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__controller = t\n if hasattr(self, '_set'):\n self._set()", "def _create_controller(main_controller, action_controller_list):\n controller = server.wsgi.Resource(main_controller())\n for ctl in action_controller_list:\n controller.register_actions(ctl())\n return controller", "def __call__(cls, *args, **kwargs):\n if args and isinstance(args[0], HttpRequest):\n instance = super(ResourceMetaclass, cls).__call__()\n return instance.__call__(*args, **kwargs)\n return super(ResourceMetaclass, cls).__call__(*args, **kwargs)", "def _create_controller(main_controller, action_controller_list):\n\n controller = server.wsgi.Resource(main_controller())\n for ctl in action_controller_list:\n controller.register_actions(ctl())\n return controller", "def name(self) -> str:\n return \"Controller\"", "def __init__(self):\n self._resourceManager = visa.ResourceManager()", "def register(Resource, route):\n _api.add_resource(Resource, route)", "def pre_routing_instance_create(self, resource_dict):\n pass", "def target_resource(self, target_resource):\n self._target_resource = target_resource", "def set_resource_type(self, klass):\n self.resource_type = klass\n self.schema = loaders.load_schema_raw(self.resource_type)", "def customise_hrm_human_resource_controller(**attr):\n\n # Custom PreP\n standard_prep = s3.prep\n def custom_prep(r):\n # Call standard prep\n if callable(standard_prep):\n result = standard_prep(r)\n if not result:\n return False\n\n if r.method == \"datalist\":\n customise_hrm_human_resource_fields()\n current.s3db.configure(\"hrm_human_resource\",\n # Don't include a Create form in 'More' popups\n listadd = False,\n list_layout = render_contacts,\n )\n\n return True\n s3.prep = custom_prep\n\n return attr", "def control(cls, car):\n\t\t# I guess for now we also update the class's controller from here, but I'm not sure if this is correct yet.\n\t\tcls.get_output(car)\n\t\tfor c in cls.controls:\n\t\t\tsetattr(car.controller, c, getattr(cls.controller, c))", "def controller_name(self, controller_name):\n\n self._controller_name = controller_name" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
It returns the FastAPI router. Use it as if you are using the original one.
def route(self) -> APIRouter: return self.router
[ "def router(self):\n return self.pluginpod.router", "def get_router():\r\n router = getattr(settings, 'RAPIDSMS_ROUTER',\r\n 'rapidsms.router.blocking.BlockingRouter')\r\n if isinstance(router, basestring):\r\n try:\r\n router = import_class(router)()\r\n except ImportError as e:\r\n raise ImproperlyConfigured(e)\r\n return router", "def create_router(self, environment, *, router=None):\n\n if router is None:\n router = self.router\n\n return utils.objects.ensure_instance(router, environment=environment)", "def get_fdcanusb_router():\n\n global _global_router\n\n if _global_router:\n return _global_router\n\n _global_router = fdcanusb.Fdcanusb()\n return _global_router", "def create_router(self, context, router):\n\n return self._create_resource('router', context, router)", "def router_with(impl):\n def _router(*routes):\n routes = freeze(list(_prioritize_routes(_conform_routes(routes))))\n return Interceptor(\n name='router',\n enter=_enter_route(impl(routes), routes))\n return _router", "def RemoteRouter(services):\n return PublicController(services)", "def taskrouter(self):\n if self._taskrouter is None:\n from twilio.rest.taskrouter import Taskrouter\n self._taskrouter = Taskrouter(self)\n return self._taskrouter", "def _configure_api_routes(self, app: FastAPI):\n authenticator = JWTAuthenticator(self.signer)\n\n data_update_publisher: Optional[DataUpdatePublisher] = None\n if self.publisher is not None:\n data_update_publisher = DataUpdatePublisher(self.publisher)\n\n # Init api routers with required dependencies\n data_updates_router = init_data_updates_router(\n data_update_publisher,\n self.data_sources_config,\n authenticator\n )\n webhook_router = init_git_webhook_router(self.pubsub.endpoint, authenticator)\n security_router = init_security_router(self.signer, StaticBearerAuthenticator(self.master_token))\n\n # mount the api routes on the app object\n app.include_router(bundles_router, tags=[\"Bundle Server\"], dependencies=[Depends(authenticator)])\n app.include_router(data_updates_router, tags=[\"Data Updates\"], dependencies=[Depends(authenticator)])\n app.include_router(webhook_router, tags=[\"Github Webhook\"])\n app.include_router(security_router, tags=[\"Security\"])\n app.include_router(self.pubsub.router, tags=[\"Pub/Sub\"])\n\n if self.jwks_endpoint is not None:\n # mount jwts (static) route\n self.jwks_endpoint.configure_app(app)\n\n # top level routes (i.e: healthchecks)\n @app.get(\"/healthcheck\", include_in_schema=False)\n @app.get(\"/\", include_in_schema=False)\n def healthcheck():\n return {\"status\": \"ok\"}\n\n return app", "def getRoutes(self):\n pass", "def _forObject(self, obj):\n router = type(self)()\n router._routes = list(self._routes)\n router._self = obj\n return router", "def create_app() -> FastAPI:\n global __app\n\n if not __app:\n __app = FastAPI(title=\"BLOGG API\",\n description=\"An api backend for a blogging app.\", version=\"0.1.0\")\n\n from src.views.index import router as pagesRouter\n from src.api.blogs_api import blog_router\n from src.api.users_api import user_router\n from src.api.engagements_api import engagement_router\n\n __app.include_router(router=pagesRouter)\n __app.include_router(router=user_router)\n __app.include_router(router=blog_router)\n __app.include_router(router=engagement_router)\n\n __app.add_middleware(\n middleware_class=CORSMiddleware,\n allow_origins=origins,\n allow_origin_regex=\"http://localhost:*\",\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n\n return __app", "def autodiscover_api_routers():\n # TODO: Support multiple API versions by allowing \"router\" to contain a dictionary\n api_router = SharedRootDefaultRouter()\n\n for app_config in apps.get_app_configs():\n app = app_config.name\n if app.startswith('django.'):\n # skip Django core apps to avoid false warnings\n continue\n\n api_module = _try_import_api(app)\n router = _try_get_router(app, api_module)\n if router:\n # if router is not None it is good\n api_router.register_router(router)\n logger.debug('registered \"%s\"', app_config.name)\n\n return api_router", "def routes(app):\n Router(app, 'index', '/', IndexController, 'index', methods=['GET'])", "def get_router(self, containers):\n for container in containers:\n if container.name == 'router':\n return container\n return None", "def router(self, route):\n try:\n return self._router(route)\n except Exception:\n print(\"***** Error inside Fake_Server.router *****\")\n info = sys.exc_info()\n print(traceback.format_exc())\n if self.usepdb:\n pdb.post_mortem(info[2])\n route.abort()", "def useRouter(self, predicate, router):\n return self.mountRouter(predicate, router)", "def _try_get_router(app, api_module):\n if not api_module:\n return\n\n router = getattr(api_module, 'router', None)\n\n if not router:\n logger.warn('%s contains an api module but it is missing a \"router\" variable.', app)\n return None\n\n if not isinstance(router, BaseRouter):\n logger.warn('%s contains an api.router, but the router is not derived from BaseRouter', app)\n return None\n\n return router", "def get_routes():\n global _routes\n if _routes is not None:\n return _routes\n \n _routes = jsonroutes.JsonRoutes(os.path.join(\"files\", \"routes\", \"**\", \"*.json\"), os.path.join(\"files\", \"scripts\", \"**\", \"*routes.json\"), variables=get_variables())\n return _routes" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if two shards overlap.
def _check_shard_metadata_pair_overlap(shard1: ShardMetadata, shard2: ShardMetadata): # For each dim of each shard, check if one shard resides on the other # end of second shard with respect to that dim. As an example for a 2D # shard, we would check if one shard is above or on the left of the # other shard. ndims = len(shard1.shard_offsets) for i in range(ndims): if shard1.shard_offsets[i] >= shard2.shard_offsets[i] + shard2.shard_lengths[i]: return False if shard2.shard_offsets[i] >= shard1.shard_offsets[i] + shard1.shard_lengths[i]: return False return True
[ "def _check_box_overlap(\n box0: ChunkStorageMetadata, box1: ChunkStorageMetadata\n) -> bool:\n\n # For each dim of each shard, check if one shard resides on the other\n # end of second shard with respect to that dim. As an example for a 2D\n # shard, we would check if one shard is above or on the left of the\n # other shard.\n ndims = len(box0.offsets)\n for i in range(ndims):\n if box0.offsets[i] >= box1.offsets[i] + box1.sizes[i]:\n return False\n if box1.offsets[i] >= box0.offsets[i] + box0.sizes[i]:\n return False\n\n return True", "def can_overlap(self):\n return False", "def span_overlap(a: Tuple[int, int], b: Tuple[int, int]) -> bool:\n return not (a[0] > b[1] or a[1] < b[0])", "def validate_non_overlapping_shards_metadata(shards: List[ShardMetadata]):\n # TODO: evaluate optimizing this if needed.\n for i in range(len(shards)):\n for j in range(i + 1, len(shards)):\n if _check_shard_metadata_pair_overlap(shards[i], shards[j]):\n raise ValueError(f'Shards {shards[i]} and {shards[j]} overlap')", "def overlaps(self, other: \"Space\") -> bool:\n # Loop over all of this space's cells\n for cell in self.cells:\n if cell.coordinates in other.cell_coordinates:\n return True\n\n # No overlap\n return False", "def hypercubes_overlap(hypercube1, hypercube2):\n if not isinstance(hypercube1, Volume) or \\\n not isinstance(hypercube2, Volume):\n raise TypeError()\n\n lowercorner1, uppercorner1 = hypercube1.get_corners()\n lowercorner2, uppercorner2 = hypercube2.get_corners()\n nb_dims = len(uppercorner1)\n \n for i in range(nb_dims):\n if not uppercorner1[i] > lowercorner2[i] or \\\n not uppercorner2[i] > lowercorner1[i]:\n return False\n\n return True", "def _overlap(x1, w1, x2, w2):\r\n if x1+w1 < x2-w2: return False\r\n if x1-w1 > x2+w2: return False\r\n\r\n return True", "def is_overlapped(self, another: 'Range') -> bool:\n a, b = self, another\n if a.start > b.start:\n a, b = b, a\n\n return a.end > b.start", "def overlap(start1, end1, start2, end2):\n return not (end1 < start2 or end2 < start1)", "def overlap(start1, end1, start2, end2):\r\n return end1 >= start2 and end2 >= start1", "def overlap(range1, range2):\n if range1[0] <= range2[1] and range2[0] <= range1[1]:\n return True\n return False", "def Overlap( entry1, entry2, min_overlap = 0 ):\n\n return (entry1.contig == entry2.contig and entry1.strand == entry2.strand and \\\n min(entry1.end, entry2.end) - max(entry1.start, entry2.start) > min_overlap)", "def no_overlap(locA1, locA2, locB1, locB2):\n if locA1 < locB1 and locA2 < locB1:\n return True\n elif locA1 > locB2 and locA2 > locB2:\n return True\n else:\n return False", "def is_overlapping(self) -> bool:\n # GH 23309\n return self._engine.is_overlapping", "def isOverlap(self, subR):\n if self.west > subR.east or self.east < subR.west or self.north < subR.south or self.south > subR.north:\n return False\n else:\n return True", "def check_overlapping(start_a, end_a, start_b, end_b):\n if start_a <= end_b and end_a >= start_b:\n return True\n return False", "def overlapping(time_1, time_2):\n\n if (time_1[0] <= time_2[0] <= time_1[1]) or (time_2[0] <= time_1[0] <= time_2[1]):\n return True\n\n return False", "def is_overlap(box_1, box_2, iou_th):\n return box_1.iou(box_2) > iou_th", "def _check_overlap (self, first, last):\n rowcount = self._table.nrows\n if rowcount == 0: return\n\n first_row = self._get_row(0)\n last_row = self._get_row(-1)\n\n if first < last_row[\"time\"] and last > first_row[\"time\"]:\n #find overlapping region\n for row in self._table.itersorted(self._table.cols.time):\n if row[\"time\"] > first:\n if not self._overwrite:\n if last > row[\"time\"]:\n common.log.info(\"%s > %s\" % (common.Time.time(last), common.Time.time(row[\"time\"])))\n raise exception.OverlapException()\n else:\n break\n else:\n pass #TODO: copy to new table, filter overlap" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensures none of the shards overlap with each other.
def validate_non_overlapping_shards_metadata(shards: List[ShardMetadata]): # TODO: evaluate optimizing this if needed. for i in range(len(shards)): for j in range(i + 1, len(shards)): if _check_shard_metadata_pair_overlap(shards[i], shards[j]): raise ValueError(f'Shards {shards[i]} and {shards[j]} overlap')
[ "def can_overlap(self):\n return False", "def validate_shards(shard_ranges_by_partition_name):\n shards_seen = set()\n previous_range = None\n for group, shard_range, in sorted(list(shard_ranges_by_partition_name.items()),\n key=lambda x: x[1]):\n if not previous_range:\n assert shard_range[0] == 0, 'Shard numbering must start at 0'\n else:\n assert previous_range[1] + 1 == shard_range[0], \\\n 'Shards must be numbered consecutively: {} -> {}'.format(\n previous_range[1], shard_range[0])\n\n shards_seen |= set(range(shard_range[0], shard_range[1] + 1))\n previous_range = shard_range\n\n num_shards = len(shards_seen)\n\n assert _is_power_of_2(num_shards), \\\n 'Total number of shards must be a power of 2: {}'.format(num_shards)", "def _check_shard_metadata_pair_overlap(shard1: ShardMetadata, shard2: ShardMetadata):\n\n # For each dim of each shard, check if one shard resides on the other\n # end of second shard with respect to that dim. As an example for a 2D\n # shard, we would check if one shard is above or on the left of the\n # other shard.\n ndims = len(shard1.shard_offsets)\n for i in range(ndims):\n if shard1.shard_offsets[i] >= shard2.shard_offsets[i] + shard2.shard_lengths[i]:\n return False\n if shard2.shard_offsets[i] >= shard1.shard_offsets[i] + shard1.shard_lengths[i]:\n return False\n\n return True", "def _check_box_overlap(\n box0: ChunkStorageMetadata, box1: ChunkStorageMetadata\n) -> bool:\n\n # For each dim of each shard, check if one shard resides on the other\n # end of second shard with respect to that dim. As an example for a 2D\n # shard, we would check if one shard is above or on the left of the\n # other shard.\n ndims = len(box0.offsets)\n for i in range(ndims):\n if box0.offsets[i] >= box1.offsets[i] + box1.sizes[i]:\n return False\n if box1.offsets[i] >= box0.offsets[i] + box0.sizes[i]:\n return False\n\n return True", "def test_simple_overlap_failure_no_overlap(self):\n\n seq_a = DNASequence('AAAATTTT')\n seq_b = DNASequence('GGGGTTTT')\n expected_num_overlap = None\n\n # Check front overlapping\n does_front_overlap, num_front_overlap = seq_a.this_front_overlaps_other(seq_b)\n assert not does_front_overlap, '{0} should NOT front overlap {1}'.format(seq_a, seq_b)\n assert num_front_overlap == expected_num_overlap, \\\n '{0} should front overlap {1} by {2}. Instead it overlaps by {3}'.format(\n seq_a, seq_b, expected_num_overlap, num_front_overlap)\n\n # Check back overlapping\n does_back_overlap, num_back_overlap = seq_b.this_back_overlaps_other(seq_a)\n assert not does_back_overlap, '{0} should NOT back overlap {1}'.format(seq_a, seq_b)\n assert num_back_overlap == expected_num_overlap, \\\n '{0} should back overlap {1} by {2}. Instead it overlaps by {3}'.format(\n seq_a, seq_b, expected_num_overlap, num_back_overlap)", "def test_simple_overlap_failure_small_overlap(self):\n\n seq_a = DNASequence('AAAATTTA')\n seq_b = DNASequence('TAGGTTTT')\n expected_num_overlap = None\n\n # Check front overlapping\n does_front_overlap, num_front_overlap = seq_a.this_front_overlaps_other(seq_b)\n assert not does_front_overlap, '{0} should NOT front overlap {1}'.format(seq_a, seq_b)\n assert num_front_overlap == expected_num_overlap, \\\n '{0} should front overlap {1} by {2}. Instead it overlaps by {3}'.format(\n seq_a, seq_b, expected_num_overlap, num_front_overlap)\n\n # Check back overlapping\n does_back_overlap, num_back_overlap = seq_b.this_back_overlaps_other(seq_a)\n assert not does_back_overlap, '{0} should NOT back overlap {1}'.format(seq_a, seq_b)\n assert num_back_overlap == expected_num_overlap, \\\n '{0} should back overlap {1} by {2}. Instead it overlaps by {3}'.format(\n seq_a, seq_b, expected_num_overlap, num_back_overlap)", "def load_overlapping_shards():\n while not event_heap and shards:\n # Try to pull events from unread shards.\n load_next_shards(shards[0].cmp_id)\n\n if event_heap and shards:\n # Pull events from all shards that overlap with the next event to be\n # yielded.\n load_next_shards(event_heap[0].id)\n elif not iterators:\n # No events in the heap and no active iterators? We're done!\n return\n\n shards_with_events = set(event.stream_shard for event in event_heap)\n for shard in iterators.keys():\n if shard in shards_with_events:\n continue\n try:\n it = iterators[shard]\n event = it.next()\n heapq.heappush(event_heap, event)\n except StopIteration:\n del iterators[shard]", "def _check_overlap (self):\n rowcount = self._table.nrows\n if rowcount == 0: return\n\n first_row = self._get_row(0)\n last_row = self._get_row(-1)\n \n if self._first < last_row[\"time\"] and self._last > first_row[\"time\"]:\n #find overlapping region\n for row in self._table.itersorted(self._table.cols.time):\n if row[\"time\"] > self._first:\n if not self._overwrite:\n if self._last > row[\"time\"]:\n common.log.info(\"%s > %s\" % (common.Time.time(self._last), common.Time.time(row[\"time\"])))\n raise exception.OverlapException()\n else:\n break\n else:\n pass #TODO: copy to new table, filter overlap", "def combineShards(shards,locX,locY,outSize,overlapRule):\n \n locX = locX.astype(int)\n locY = locY.astype(int)\n \n sz = shards.size()\n \n startLocX = np.min(locX)\n startLocY = np.min(locY)\n \n endLocX = np.max(locX) + sz[2] + np.abs(startLocX)\n endLocY = np.max(locY) + sz[3] + np.abs(startLocY)\n \n if startLocX<0:\n locX += np.abs(startLocX)\n if startLocY<0:\n locY += np.abs(startLocY)\n \n \n # prepare output tensor as well as counting grid\n out = torch.Tensor(sz[1],int(endLocX),int(endLocY))\n out[:] = 0\n count = torch.Tensor(1,int(endLocX),int(endLocY))\n count[:] = 0\n \n # iterate over shards and restore\n for i in range(0,sz[0]):\n posX = int(i / locX.shape[1])\n posY = int(i % locX.shape[1])\n \n coordsX = locX[posX,posY]\n coordsY = locY[posX,posY]\n \n shard = shards[i,:,:,:]\n \n if overlapRule=='sum':\n out[:,coordsX:coordsX+sz[2],coordsY:coordsY+sz[3]] += shard\n elif overlapRule=='max':\n out[:,coordsX:coordsX+sz[2],coordsY:coordsY+sz[3]] = torch.max(out[:,coordsX:coordsX+sz[2],coordsY:coordsY+sz[3]],shard)\n elif overlapRule=='min':\n out[:,coordsX:coordsX+sz[2],coordsY:coordsY+sz[3]] = torch.min(out[:,coordsX:coordsX+sz[2],coordsY:coordsY+sz[3]],shard)\n else:\n out[:,coordsX:coordsX+sz[2],coordsY:coordsY+sz[3]] += shard\n count[:,coordsX:coordsX+sz[2],coordsY:coordsY+sz[3]] += 1\n\n \n \n # normalise according to specified flag\n if overlapRule=='average' or overlapRule=='avg':\n out /= count.expand_as(out)\n \n \n # crop if necessary\n if outSize is not None:\n sz_out = out.size()\n if sz_out[1]!=outSize[0] or sz_out[2]!=outSize[1]:\n overhangX = (sz_out[1] - outSize[0])/2\n overhangY = (sz_out[2] - outSize[1])/2\n \n out = out[:,overhangX:-overhangX,overhangY:-overhangY]\n \n return out", "def test_connecting_span_with_overlap(self):\n s1 = Span(10, 30, 100, True)\n s2 = Span(20, 50, 100, True)\n assert not s1.connecting_span(s2)", "def _assert_no_scope_overlap(children) -> None: # noqa: ANN001\n for c0, c1 in itertools.combinations(children, 2):\n if set(c0.scope) & set(c1.scope):\n raise OverlappingScopesException(\n \"Children {} and {} have overlapping scopes\".format(c0, c1)\n )", "def _CheckOverlap(self):\n required = filter(self._IsRequired, self.entries)\n entries = sorted(required, key=lambda e: e.offset)\n all_entries = len(self.entries) == len(required)\n for e1, e2 in zip(entries, entries[1:]):\n # Allow overlap between \"pure\" fmap areas, but not any of its subclasses\n # Here we exploit the fact that Entry is a new-style class\n if type(e1) is EntryFmapArea or type(e2) is EntryFmapArea:\n continue\n\n overlap = e1.GetOverlap(e2)\n if overlap > 0:\n raise ValueError('Flash map entries overlap by %d bytes: '\n '%s: %08x-%08x, %s: %08x-%08x' %\n (overlap, e1.label, e1.offset, e1.offset + e1.size,\n e2.label, e2.offset, e2.offset + e2.size))\n elif overlap is not 0:\n self._out.Warning('Warning: Flash map has a gap of %d bytes: '\n '%s: %08x-%08x, %s: %08x-%08x' %\n (-overlap, e1.label, e1.offset, e1.offset + e1.size,\n e2.label, e2.offset, e2.offset + e2.size))\n\n return all_entries", "def _check_overlap (self, first, last):\n rowcount = self._table.nrows\n if rowcount == 0: return\n\n first_row = self._get_row(0)\n last_row = self._get_row(-1)\n\n if first < last_row[\"time\"] and last > first_row[\"time\"]:\n #find overlapping region\n for row in self._table.itersorted(self._table.cols.time):\n if row[\"time\"] > first:\n if not self._overwrite:\n if last > row[\"time\"]:\n common.log.info(\"%s > %s\" % (common.Time.time(last), common.Time.time(row[\"time\"])))\n raise exception.OverlapException()\n else:\n break\n else:\n pass #TODO: copy to new table, filter overlap", "def test_simple_overlap_success(self):\n\n seq_a = DNASequence('AAATGACCA')\n seq_b = DNASequence('TGACCATTT')\n expected_num_overlap = 6\n\n # Check front overlapping\n does_front_overlap, num_front_overlap = seq_a.this_front_overlaps_other(seq_b)\n assert does_front_overlap, '{0} should front overlap {1}'.format(seq_a, seq_b)\n assert num_front_overlap == expected_num_overlap, \\\n '{0} should front overlap {1} by {2}. Instead it overlaps by {3}'.format(\n seq_a, seq_b, expected_num_overlap, num_front_overlap)\n\n # Check back overlapping\n does_back_overlap, num_back_overlap = seq_b.this_back_overlaps_other(seq_a)\n assert does_back_overlap, '{0} should back overlap {1}'.format(seq_a, seq_b)\n assert num_back_overlap == expected_num_overlap, \\\n '{0} should back overlap {1} by {2}. Instead it overlaps by {3}'.format(\n seq_a, seq_b, expected_num_overlap, num_back_overlap)", "def no_overlap(locA1, locA2, locB1, locB2):\n if locA1 < locB1 and locA2 < locB1:\n return True\n elif locA1 > locB2 and locA2 > locB2:\n return True\n else:\n return False", "def warn_overlap(self):\n for section_a, section_b in itertools.combinations(self.items.keys(), 2):\n items_overlap = sorted(set(self[section_a]) & set(self[section_b]))\n a_primary, _, a_secondary = section_a.partition(':')\n b_primary, _, b_secondary = section_a.partition(':')\n primary_section_overlaps = a_primary == b_primary\n secondary_sections_separate = primary_section_overlaps and '' not in [a_secondary, b_secondary]\n if items_overlap and not secondary_sections_separate:\n print('\\nWarning: these entries are configured to both {0} and {1}:'.format(section_a, section_b))\n print(items_overlap)", "def test_many_shards(self):\n from extractor.sharder import _generate_shard_ranges\n\n today = datetime.datetime.today()\n\n shard_range = (today - datetime.timedelta(days=1), today)\n\n shards = _generate_shard_ranges(24, 'datetime', shard_range)\n\n expected_shards = [\n (today - datetime.timedelta(hours=24 - i),\n today - datetime.timedelta(hours=23 - i))\n for i in xrange(24)]\n\n self.assertEqual(expected_shards, shards)", "def overlap_conflict(out, *inputs):\n from . import _bh\n\n for i in inputs:\n if not np.isscalar(i):\n if np.may_share_memory(out, i) and not _bh.same_view(out, i):\n return True\n return False", "def check_loc_overlaps(init_locations, kernels_size):\r\n for i in range(len(init_locations)):\r\n for j in range(i + 1, len(init_locations)):\r\n if abs(init_locations[i][0] - init_locations[j][0]) < kernels_size and abs(\r\n init_locations[i][1] - init_locations[j][1]) < kernels_size:\r\n raise ValueError(\"kernel-{0} and kernel-{1} are overlapping.\".format(i, j))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if the shards_metadata is compatible with the provided tensor dims.
def check_tensor(shards_metadata, tensor_dims) -> None: # If the tensor's volume matches the total volume of all shards and # all shard boundaries are within tensor dims, we have a compatible # sharding spec for this tensor. Note that we have already verified # we don't have overlapping shards. tensor_rank = len(tensor_dims) shards_rank = len(shards_metadata[0].shard_offsets) if tensor_rank != shards_rank: raise ValueError(f'Rank of tensor is {tensor_rank}, but shards rank is {shards_rank}') total_shard_volume = 0 for shard in shards_metadata: shard_volume = 1 for i, shard_length in enumerate(shard.shard_lengths): shard_volume *= shard_length if shard.shard_offsets[i] + shard.shard_lengths[i] > tensor_dims[i]: raise ValueError( f'Shard offset {shard.shard_offsets[i]} and length ' f'{shard.shard_lengths[i]} exceeds tensor dim: {tensor_dims[i]} for shard {shard}') total_shard_volume += shard_volume tensor_volume = 1 for size in tensor_dims: tensor_volume *= size if total_shard_volume != tensor_volume: # TODO: Can we improve this error message to point out the gaps? raise ValueError( f'Total volume of shards: {total_shard_volume} ' f'does not match tensor volume: {tensor_volume}, in other words ' f'all the individual shards do not cover the entire tensor')
[ "def has_dims(xobj, dims, kind):\n if isinstance(dims, str):\n dims = [dims]\n\n if not all(dim in xobj.dims for dim in dims):\n raise DimensionError(\n f'Your {kind} object must contain the '\n f'following dimensions at the minimum: {dims}'\n )\n return True", "def _check_input_dimensions(self, obs_dim):\n if obs_dim == self.length_raw_array:\n return True\n else:\n raise ObservationArrayExpectedDimFail()", "def _check_dimensions(self, workspace_to_check):\n for i in range(self._raw_ws.getNumDims()):\n if self._raw_ws.getDimension(i).getNBins() != workspace_to_check._raw_ws.getDimension(i).getNBins():\n return False\n return True", "def _check_tensor_shapes(tensors):\n for tensor in tensors:\n tensor = tf.convert_to_tensor(value=tensor)\n tensor.get_shape().assert_has_rank(2)\n tensor.get_shape().assert_is_compatible_with(\n tf.convert_to_tensor(value=tensors[0]).get_shape())", "def check_qim_dim_match(cls, qim, dim):\n return len(qim) == len(dim)", "def check_dims(dims):\n dims_types = [list, tuple]\n dim_spec = []\n\n if type(dims) not in dims_types:\n raise TypeError(\"The dimensions must be a list or tuple.\")\n\n for dim in dims:\n if type(dim) not in dims_types:\n raise TypeError(\n \"The dimension {} must be a list or tuple.\" \"\".format(dim)\n )\n\n for entry in dim:\n if type(entry) not in dtypes.all:\n raise TypeError(\n \"The entry {} in dimension {} cannot be \"\n \"used with OptTask. A list of acceptable \"\n \"datatypes is {}\".format(entry, dim, dtypes.all)\n )\n for dset in [dtypes.ints, dtypes.floats, dtypes.others]:\n if type(entry) not in dset and type(dim[0]) in dset:\n raise TypeError(\n \"The dimension {} contains heterogeneous\"\n \" types: {} and {}\".format(dim, type(dim[0]), type(entry))\n )\n if isinstance(dim, list):\n if type(dim[0]) in dtypes.ints:\n dim_spec.append(\"int_set\")\n elif type(dim[0]) in dtypes.floats:\n dim_spec.append(\"float_set\")\n elif type(dim[0]) in dtypes.others:\n dim_spec.append(\"categorical {}\".format(len(dim)))\n elif isinstance(dim, tuple):\n if type(dim[0]) in dtypes.ints:\n dim_spec.append(\"int_range\")\n elif type(dim[0]) in dtypes.floats:\n dim_spec.append(\"float_range\")\n elif type(dim[0]) in dtypes.others:\n dim_spec.append(\"categorical {}\".format(len(dim)))\n return dim_spec", "def check_has_dims(hdr):\n try:\n return (hdr['startX'], hdr['startY'])\n except KeyError:\n return False", "def has_all_dims(dp_or_event, dims):\n return dims.items() <= {d.key: d.value for d in dp_or_event.dimensions}.items()", "def has_dimension(self, dim):\n\n return self.units.dimensions == dim", "def _validate(self, tensors):\n if len(tensors) != len(self._tensors):\n raise ValueError('Expected tensors to have %d elements. Received %d '\n 'instead.' % (len(self._tensors), len(tensors)))\n if self._tensors.keys() != tensors.keys():\n raise ValueError('The keys of tensors should be the always the same.'\n 'Received %s instead %s.' %\n (tensors.keys(), self._tensors.keys()))\n for name, tensor in tensors.items():\n if tensor.get_shape().as_list() != self._tensors[\n name].get_shape().as_list()[1:]:\n raise ValueError('Tensor %s has incorrect shape.' % name)\n if not tensor.dtype.is_compatible_with(self._tensors[name].dtype):\n raise ValueError(\n 'Tensor %s has incorrect data type. Expected %s, received %s' %\n (name, self._tensors[name].read_value().dtype, tensor.dtype))", "def _check_shard_metadata_pair_overlap(shard1: ShardMetadata, shard2: ShardMetadata):\n\n # For each dim of each shard, check if one shard resides on the other\n # end of second shard with respect to that dim. As an example for a 2D\n # shard, we would check if one shard is above or on the left of the\n # other shard.\n ndims = len(shard1.shard_offsets)\n for i in range(ndims):\n if shard1.shard_offsets[i] >= shard2.shard_offsets[i] + shard2.shard_lengths[i]:\n return False\n if shard2.shard_offsets[i] >= shard1.shard_offsets[i] + shard1.shard_lengths[i]:\n return False\n\n return True", "def _check_dim_array(array, ndim):\n # enlist the number of expected dimensions\n if isinstance(ndim, int):\n ndim = [ndim]\n\n # check the number of dimensions of the array\n if array.ndim not in ndim:\n raise ValueError(\"Array can't have {0} dimension(s). Expected \"\n \"dimensions are: {1}.\".format(array.ndim, ndim))", "def _check_dims(self, state, controls):\n try:\n iter(state)\n iter(controls)\n except TypeError:\n raise TypeError(\n \"Please use containers for controls and states: value --> [value].\"\n )\n else:\n assert self.controls_dims == len(\n controls\n ), f\"This model expects {self.controls_dims} controls!\"\n assert self.states_dims == len(\n state\n ), f\"This model expects {self.states_dims} controls!\"", "def test_dimensions(nc, requirements):\n retVal = 0\n\n for req_dimension in requirements['dimensions']:\n if req_dimension not in nc.dimensions:\n print \"Dimension Missing: %s\" % (req_dimension)\n retVal += 1\n\n return retVal", "def assert_only_dim_reduced(dim, actual, obs):\n if dim is None:\n dim = list(obs.dims)\n elif not isinstance(dim, list):\n dim = [dim]\n for d in dim:\n assert d not in actual.dims\n for d in obs.dims:\n if d not in dim:\n assert d in actual.dims", "def __validate_dimensions(self, v):\n return \\\n is_positive_float(v) and self.__are_parameters_consistent()", "def _check_dimension_acls(request):\n dim_acls = config.settings().dimension_acls\n if not dim_acls or not dim_acls.entry:\n return # not configured, this is fine\n\n ident = request.authenticated\n dims = request.properties.dimensions\n assert 'id' in dims or 'pool' in dims, dims # see _validate_dimensions\n assert ident is not None # see task_request.init_new_request\n\n # Forbid targeting individual bots for non-admins, but allow using 'id' if\n # 'pool' is used as well (so whoever can posts tasks to 'pool', can target an\n # individual bot in that pool).\n if 'id' in dims and 'pool' not in dims:\n if not acl.is_admin():\n raise auth.AuthorizationError(\n 'Only Swarming administrators can post tasks with \"id\" dimension '\n 'without specifying a \"pool\" dimension.')\n\n for k, v in sorted(dims.iteritems()):\n if not _can_use_dimension(dim_acls, ident, k, v):\n raise auth.AuthorizationError(\n 'User %s is not allowed to schedule tasks with dimension \"%s:%s\"' %\n (ident.to_bytes(), k, v))", "def check_dimensions(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert 4 <= len(X.dims) <= 5, 'XCast requires a dataset to be 4-Dimensional'\n\tassert x_lat_dim in X.dims, 'XCast requires a dataset_lat_dim to be a dimension on X'\n\tassert x_lon_dim in X.dims, 'XCast requires a dataset_lon_dim to be a dimension on X'\n\tassert x_sample_dim in X.dims, 'XCast requires a dataset_sample_dim to be a dimension on X'\n\tassert x_feature_dim in X.dims, 'XCast requires a dataset_feature_dim to be a dimension on X'", "def check_sizes(input_tensor, input_name, expected):\n condition = [input_tensor.ndimension() == len(expected)]\n for i, size in enumerate(expected):\n if size.isdigit():\n condition.append(input_tensor.size(i) == int(size))\n assert(all(condition)),\\\n \"wrong size for {}, expected {}, got {}\".format(input_name, 'x'.join(expected), list(input_tensor.size()))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process newly submitted GeoPost entry... PROCEEDURE 1) Get data from POST body 2) Validate form 3) Upload photo to bucket 4) Make WFS transaction with GeoServer
def post(self, request): # GET REQUEST DATA fid = request.POST.get('fid', False) uuid = request.POST.get('uuid', False) title_text = request.POST.get('title', False) body = request.POST.get('body', False) photo = request.FILES.get('photo', False) # FOR STORAGE wfsxml = request.POST.get('wfsxml', False) # FOR GEOSERVER data = { 'uuid': uuid, 'title_text': title_text, 'body': body, 'wfsxml': wfsxml } # VALIDATE FORM form = GeoPostForm(data, request.FILES) logger.info("\ninstantiate Geopost form\n") # IF FORM VALIDATION ERROR if not form.is_valid(): return server_error(request.body) #context = self.getContext(form) #return render(request, 'geopost/entry.html', context) else: pass # GET CLEAN VALUES uuid = form.cleaned_data['uuid'] wfsxml = form.cleaned_data['wfsxml'] # UPLOAD PHOTO TO BUCKET # if editing existing entry, first delete existing photo if fid: delete_from_bucket(uuid, self.imageBucket) else: pass photo.open('rb') error = upload_to_bucket( photo, self.imageBucket, photo.content_type, uuid) photo.close() # IF ERROR UPLOADING IMAGE if error: return server_error(error) else: pass # MAKE GEOSERVER WFS TRANSACTION error = post_to_geoserver(wfsxml, self.wfsURL) # ALL GOOD if not error: return HttpResponseRedirect(reverse('geopost_home')) # IF WFS TRANSACTION ERROR else: delete_from_bucket(uuid, self.imageBucket) return server_error(error)
[ "def _preprocess_rack_form(postdata):\n\n if postdata[u'geocoded'] != u'1':\n if postdata['address'].strip():\n results = _geocode(postdata['address'])\n # XXX handle multiple (or zero) results.\n try:\n lat, lon = results[0][1]\n except IndexError:\n # no results. XXX what to do here?\n postdata[u'location'] = u''\n else:\n postdata[u'location'] = str(Point(lon, lat, srid=SRID))\n \n # Handle a registered user submitting without logging in...\n # eg. via email.\n user = postdata.get('user', '').strip()\n email = postdata.get('email', '').strip()\n if email and not user:\n users = User.objects.filter(email=email).all()\n if len(users) == 1:\n postdata['user'] = users[0].username", "def do_POST(self):\n global pages, devices, settings\n try:\n ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))\n if ctype == 'application/x-www-form-urlencoded':\n length = int(self.headers.getheader('content-length'))\n postvars = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1)\n #if(self.path != '/simple/updateGPSCoordinates'):\n #print postvars\n #print self.path\n #now call the function that is meant to process this request\n if(self.path == '/simple/selectedHousehold'):\n #print 'need to get all cows in household #%s ' % postvars['household'][0]\n output = pages[postvars['page'][0]].selectedHousehold(postvars['household'][0], devices['gps'])\n self.wfile.write(output)\n elif(self.path == '/simple/selectedSite'):\n #print 'need to get all the households from the site #%s ' % postvars['sites'][0]\n output = pages[postvars['page'][0]].selectedSite(postvars['sites'][0], devices['gps'])\n self.wfile.write(output)\n elif(self.path == '/simple/nextAnimal'):\n #print 'we have finalized saving samples for one animal, now we need to go to the next animal'\n output = pages[postvars['page'][0]].nextAnimal(postvars, devices['gps'])\n self.wfile.write(output)\n elif(self.path == '/simple/sampleCow'):\n #print 'we sampling the cow'\n #we have the cow that we want to sample...now proceed with the sampling\n output = pages[postvars['page'][0]].collectSample(postvars, devices['gps'])\n self.wfile.write(output)\n elif(self.path == '/simple/saveSample'):\n #print 'we saving a new sample'\n #the user have entered a sample for an animal\n output = pages[postvars['page'][0]].saveSample(postvars, devices['gps'], settings['barcode_use'])\n self.wfile.write(output)\n elif(self.path == '/simple/updateGPSCoordinates'):\n #we want to get the current GPS position\n output = pages[postvars['page'][0]].curPosition(devices['gps']) #for the sake of consistence, we just using the passed 'page' variable\n self.wfile.write(output)\n elif(self.path == '/simple/deleteSample'):\n #print 'we need to delete the sample %s ' % postvars['sample'][0]\n #the user have entered a sample for an animal\n output = pages[postvars['page'][0]].deleteSample(postvars['sample'][0], devices['gps'])\n self.wfile.write(output)\n elif(self.path == '/simple/deleteAnimal'):\n #print postvars\n #print 'we need to delete the anial %s ' % postvars['curAnimalRead'][0]\n #the user have entered a sample for an animal\n output = pages[postvars['page'][0]].deleteAnimal(postvars['curAnimalRead'][0], devices['gps'])\n self.wfile.write(output)\n elif(self.path == '/simple/showAllSites'):\n #print postvars\n #print 'we either to show all sites or just the households within a certain radius'\n #the user have entered a sample for an animal\n output = pages[postvars['page'][0]].showSites(postvars, devices['gps'])\n self.wfile.write(output)\n elif(self.path == '/simple/refreshSampler'):\n #print 'I really dont know what to do here, so we shall evaluate it a case on case basis'\n output = pages[postvars['page'][0]].refreshSampler(postvars, devices['gps'])\n self.wfile.write(output)\n elif(self.path == '/simple/updateHouseholds'):\n #print 'The radius of interest has changed...lets update the households'\n output = pages[postvars['page'][0]].updateSites(postvars, devices['gps'])\n self.wfile.write(output)\n elif(self.path == '/admin'):\n #print 'admin page'\n \n if ctype == 'multipart/form-data':\n self.send_response(301)\n form = cgi.parse_multipart(self.rfile, pdict)\n #print form\n pages[form['page'][0]].parse_form(form, info, devices)\n self.send_header('Location', 'http://localhost:%s/%s' % (settings['port'], form['page'][0]))\n self.end_headers()\n except IOError:\n self.send_error(501, 'Unsupported Method')", "def submit_fishfry():\n logging.info(\"\\nsubmit ----------\")\n # pdb.set_trace()\n form = FishFryForm()\n # logging.info(json.dumps(request.form, indent=2))\n # ffid = form['ffid']\n if form.validate_on_submit():\n\n # ---------------------------------------------------------------------\n # get the form data and plug it into the geojson.\n # some of that data requires post-processing; that is done here.\n\n # feature_dict = postprocess_submit(request.form.to_dict())\n\n properties = {\n \"venue_name\": form.venue_name.data,\n \"venue_address\": form.venue_address.data,\n \"venue_type\": form.venue_type.data,\n \"venue_notes\": form.venue_notes.data,\n \"website\": form.website.data,\n \"email\": form.email.data,\n \"phone\": form.phone.data,\n \"etc\": form.etc.data,\n \"handicap\": postbool(form.handicap.data),\n \"alcohol\": postbool(form.alcohol.data),\n \"homemade_pierogies\": postbool(form.homemade_pierogies.data),\n \"lunch\": postbool(form.lunch.data),\n \"take_out\": postbool(form.take_out.data),\n \"validated\": form.validated.data,\n \"publish\": form.publish.data,\n \"menu\": {\n \"text\": form.menu_txt.data,\n \"url\": form.menu_url.data\n },\n \"events\": postprocess_events(form.events.data)\n }\n geometry = {\n \"type\": \"Point\",\n \"coordinates\": [form.lng.data, form.lat.data]\n }\n\n feature = {\n \"type\": \"Feature\",\n \"properties\": properties,\n \"geometry\": geometry\n }\n\n logging.info(json.dumps(feature, indent=2))\n\n # OPTOINAL: validate with Marshmallow here\n # (WTForms is also providing validation)\n # try:\n # result = Feature().load(feature)\n # except ValidationError as err:\n # logging.warning(err.messages)\n # logging.warning(err.data)\n\n # ---------------------------------------------------------------------\n # if there is an id already provided by the form, then this is an\n # existing record, and we're doing an update.\n ffid = form.ffid.data\n if ffid and ffid != \"None\":\n logging.info(\"This is an existing record ({0})\".format(ffid))\n onefry = update_one_fishfry(\n ffid,\n properties,\n geometry\n )\n logging.info(json.dumps(onefry, indent=2))\n\n flash('Fish Fry updated! ({0})'.format(ffid), \"info\")\n return redirect(url_for('load_fishfry', ffid=ffid))\n\n # ----------------------------------------------------------------------\n # Otherwise this is a new record. An FFID will be assigned\n # closer to the metal.\n else:\n logging.info(\"This is a new record\")\n\n # submit to the db\n onefry = make_one_fishfry(\n properties=properties,\n geometry=geometry\n )\n if 'id' in onefry.keys():\n ffid = onefry['id']\n # once the record create is submitted, reload this page with the data.\n flash('Fish Fry added! ({0})'.format(ffid), \"success\")\n return redirect(url_for('load_fishfry', ffid=ffid))\n else:\n flash(\n \"There was an 500-level error when adding data to the database.\", \"danger\")\n return render_template(\n 'pages/fishfryform.html',\n form=form,\n )\n # flash(\"Invalid data:\\n\"{0}.format(\"\\n\".join([error for error in form.errors])))\n # flash(\"You can only submit data through the form via POST request.<br>Consider using the API if you want to work with data programmatically.\", \"info\")\n # return redirect(url_for('load_fishfry', ffid=ffid))\n return render_template(\n 'pages/fishfryform.html',\n form=form\n )", "def parse_post(request):\n\n fp = StringIO(request.raw_body)\n\n headers = {}\n headers['content-type'] = request.message.get('content-type')\n headers['content-length'] = request.message.get('content-length')\n\n environ = {}\n environ['REQUEST_METHOD'] = request.method\n\n boundary = request.message.get('boundary')\n\n post = cgi.FieldStorage( fp = fp\n , headers = headers\n , outerboundary = boundary\n , environ = environ\n , keep_blank_values = True\n , strict_parsing = False\n )\n\n return post", "def process_submission(self, submission):\n pass", "def _upload_post(self):\n\n # Fetch the user's identifier from the request, which\n # contains the oauth2 creds.\n try:\n token = flask.request.headers['X-IDTOKEN']\n except Exception as e:\n return flask.Response('Missing credential token header', 405)\n\n try:\n idinfo = client.verify_id_token(token, sk.GOOGLE_OAUTH2_CLIENT_ID)\n if idinfo['iss'] not in ['accounts.google.com', 'https://accounts.google.com']:\n raise crypt.AppIdentityError(\"Wrong issuer.\")\n except crypt.AppIdentityError:\n # Invalid token\n return flask.Response('Application identity error.', 405)\n user_id = idinfo['sub']\n hash_id = hashlib.sha256(user_id).hexdigest()\n\n content_type = self.request.content_type\n\n datastore_client = self.datastore.Client(self.config['PROJECT_ID'])\n batch = datastore_client.batch()\n\n for file_ in self.request.files.getlist('datafile'):\n # In case an error occured and the filename was not sent\n # filename = self.request.headers.get(constants.HTTP_FILENAME_HEADER) or ''\n filename = file_.filename\n ext = self.os.path.splitext(filename)[1].strip('.')\n name = '.'.join((str(uuid4()), ext))\n\n entity = self._create_datastore_entry(datastore_client, name, user=hash_id)\n\n if entity:\n batch.put(entity)\n\n # Local file system file paths\n local_file = self.os.path.join(self._dir, name)\n temp_file = local_file + self._file_not_ready_suffix\n\n try:\n self._write_data_to_file(temp_file, file_)\n\n except IOError as e:\n self.logger.error('Error occured writing to file: {0}'.format(e))\n return self.Response('Failed to save file.',\n status=constants.HTTP_ERROR)\n\n except ClientDisconnected:\n # This error will occur if Gunicorn/Flask fails to respond before\n # the load balancer times the request out. In this situation, the\n # load balancer responds to the client with a 502 error, however\n # this is not detected by Flask until it reads to the end of the\n # buffered request from nginx at which point this exception will be\n # thrown by the call to self.request.stream.read in\n # _write_data_to_file.\n try:\n self.util.retry_func(self.os.remove, self._retrys,\n (OSError, ), temp_file)\n except RuntimeError:\n pass\n self.logger.error('Upload failed. Client disconnected.')\n return self.Response(status=constants.HTTP_ERROR)\n\n try:\n self.util.retry_func(self.os.rename, self._retrys,\n (OSError, ), temp_file, local_file)\n except RuntimeError:\n return self.Response('Failed to save file.',\n status=constants.HTTP_ERROR)\n\n try:\n batch.commit()\n except FailedToSaveToDatastoreError as e:\n self.logger.error(str(e))\n # Continue on for now. The upload daemon will create a datastore\n # entity if it doesn't find one, it will just be missing the user\n # information.\n\n return self.Response(status=constants.HTTP_OK)\n\n def _create_datastore_entry(self, datastore_client, filename, user=None):\n \"\"\"\n Creates and returns a datastore entity for a file with name filename\n uploaded by user user.\n Filename should be the new name we have generated that is <uuid>.<ext>.\n Raises FailedToSaveToDatastoreError if unsuccessful.\n \"\"\"\n # Create datastore entity\n key = datastore_client.key(self._datastore_kind, filename)\n entity = self.datastore.Entity(key=key)\n\n # Set datastore entity data\n entity['user'] = datastore_client.key(self._user_datastore_kind, user)\n entity['in_gcs'] = False\n entity['processed'] = False\n entity['uploaded_date'] = self.datetime.now()\n\n if not ds.validate_data(entity, True, ds.DATASTORE_PHOTO):\n msg = 'Invalid entity: {0}'.format(entity)\n return None\n\n return entity", "def post_location_job() -> None:\n if not Server.token:\n Server.login()\n\n try:\n location = GPSD.get_location()\n except Exception:\n logger.exception(\"Cannot acquire location\")\n return\n\n if Server.token:\n Server.send_unsent_locations()\n Server.post_location(location)\n else:\n Server.append_failed_location(location)", "def handle_request(self):\n try:\n content_type = self.headers.get('content-type')\n\n if content_type != 'application/json':\n self.write_empty_response(400)\n return\n\n content_len = int(self.headers.get('content-length', 0))\n\n # If content was provided, then parse it\n if content_len > 0:\n message = json.loads(self.rfile.read(content_len))\n else:\n self.write_empty_response(400)\n return\n\n helper.log_info(f'Incoming POST from {self.client_address[0]}: {message}')\n\n aspect_type = message['aspect_type']\n object_id = message['object_id']\n object_type = message['object_type']\n # make owner_id a str to avoid issues with athlete_checkpoint dict\n owner_id = str(message['owner_id'])\n\n athlete_checkpoint = helper.get_check_point(\"webhook_updates\") or {}\n\n # We only care about activity updates. New activities are pulled in automatically as strava_api input restarts.\n if aspect_type == 'update' and object_type == 'activity':\n if owner_id not in athlete_checkpoint:\n athlete_checkpoint[owner_id] = []\n athlete_checkpoint[owner_id].append(object_id)\n helper.save_check_point(\"webhook_updates\", athlete_checkpoint)\n else:\n athlete_checkpoint[owner_id].append(object_id)\n helper.save_check_point(\"webhook_updates\", athlete_checkpoint)\n helper.log_debug(f'webhooks_updates checkpoint: {helper.get_check_point(\"webhook_updates\")}')\n\n # Send data to Splunk\n data = json.dumps(message)\n event = helper.new_event(source=helper.get_input_type(), index=helper.get_output_index(), sourcetype=helper.get_sourcetype(), data=data)\n ew.write_event(event)\n\n # Strava API expects a 200 response\n self.write_empty_response(200)\n\n # Restart strava_api inputs to pull in the data unless it's a delete, as the input doesn't do anything with that anyway.\n if aspect_type != 'delete':\n self.restart_input('strava_api', self.SESSION_KEY)\n helper.log_info(f'Reloading Strava API input to retrieve updated activity {object_id} for athlete {owner_id}.')\n\n except Exception as ex:\n helper.log_error(f'Something went wrong in handle request: {ex}')", "def post(self):\n\n upload_files = self.get_uploads('file')\n blob_info = upload_files[0]\n self.redirect('/?upload_info=%s' % urllib.quote(blob_info.filename))", "def submit(self):\n\n # Get BlobUploader URL\n response = requests.get(SurlyfritterImage.UPLOAD_URL)\n form_action_url = re.findall(r'form action=\"(.*?)\"', response.text)[0]\n\n with open(self.fname, 'rb') as image_fh:\n files = {'file': image_fh}\n data = {}\n response = requests.post(form_action_url, files=files, data=data)\n response.raise_for_status()", "def upload_finish(self, cloud_file):", "def map_input():\n\n page_title(\"Generate Map\")\n st.sidebar.write('')\n\n config = load_config()\n \n page = st.sidebar.selectbox(\"Select a page\",\n [\"Map input\", \"View map\"])\n\n if page == \"Map input\":\n\n with st.form(key='map_input'):\n\n map_title = st.text_input('Title')\n zip_file = st.file_uploader('Select zip file', key='upload_doc', type=[\"zip\"])\n \n submit = st.form_submit_button('Submit')\n\n if submit:\n if not map_title:\n return st.error('Please enter title.')\n if zip_file is not None:\n myzipfile = zipfile.ZipFile(zip_file)\n myzipfile.extractall('asset/tmp')\n else:\n return st.error('Please select shape file.')\n\n shape_file_name = glob.glob('asset/tmp/'+zip_file.name[:-4]+'/*.shp')[0]\n\n if not config[\"arcgis_key\"]:\n return st.error('Arcgis authentication key missing.')\n\n try:\n\n gdf = gpd.read_file(shape_file_name)\n data = json.loads(gdf['geometry'].to_json())['features']\n\n # here we are getting javascript code to call arcgis library.\n arcgis_js = load_arcgis_js()\n\n matches = re.finditer('<<(\\w*)>>', arcgis_js)\n\n for i in matches:\n\n if i.group(0) == \"<<title>>\":\n arcgis_js = arcgis_js.replace(i.group(0), map_title)\n \n if i.group(0) == \"<<api_key>>\":\n arcgis_js = arcgis_js.replace(i.group(0), config[\"arcgis_key\"])\n \n if i.group(0) == \"<<cordi>>\":\n arcgis_js = arcgis_js.replace(i.group(0), json.dumps(data[0]['geometry']['coordinates'][0][0][0][:2]))\n \n if i.group(0) == \"<<multi_ploy_list>>\":\n arcgis_js = arcgis_js.replace(i.group(0), json.dumps(data[0]['geometry']['coordinates'][0]))\n\n map_file_name = \"asset/html/\"+zip_file.name[:-4]+\".html\"\n file_dump = open(map_file_name,\"w\")\n file_dump.write(arcgis_js) \n file_dump.close()\n\n hti = Html2Image(custom_flags=['--virtual-time-budget=10000', '--hide-scrollbars'],output_path='asset/images/')\n map_image_name = map_title+\"_\"+str(datetime.today())+\"_\"+zip_file.name[:-4]+'.png'\n hti.screenshot(html_file=map_file_name,save_as=map_image_name,size=(946,575))\n\n st.image('asset/images/'+map_image_name)\n\n except Exception as e:\n return st.error(\"There was some issue in generating map.\")\n\n else:\n\n images_path = 'asset/images'\n\n images = os.listdir(images_path)\n\n image = st.selectbox(\"Select an image\",images)\n\n st.write('')\n st.markdown(\"<b>Map title</b> : \"+image.split(\"_\")[0], unsafe_allow_html=True)\n st.write('')\n st.markdown(\"<b>Map created at</b> : \"+image.split(\"_\")[1].split(\".\")[0], unsafe_allow_html=True)\n st.write('')\n\n st.image(images_path+\"/\"+image)", "def submit_plugin_form_data(self, form_entry, request, form,\n form_element_entries=None, **kwargs):", "def on_post(self, req, resp, account, container):\n _handle_script_upload(req, resp, account, container)", "def uploadData(self):", "def process(submitted_doc):\n\n submitted_data = submitted_doc.data\n url = submitted_data.get('originalUrl')\n if not url:\n raise ProcessingInputError('POST[\"data\"] does not have \"originalUrl\" set!')\n url = url.strip()\n\n doc_file = download(url)\n if not doc_file.tika_data:\n with doc_file.open() as f:\n doc_file.tika_data = tika_parse(f)\n doc_file.save()\n\n data = dict(submitted_data.items())\n data.update(doc_file.tika_data)\n\n doc, created = models.Document.objects.update_or_create(\n file_url=url,\n parsed=data,\n submit=submitted_doc,\n file=doc_file,\n )\n\n return doc, created", "def upload_file_geocode(self, **kwargs):\n kwargs.update({'api_key': self.params['api_key'], })\n if self.check_required_params(kwargs, ['strUploadID', 'files', ]):\n self.response = self.api._request_post(FILE_UPLOAD_GEOCODE_HOST,\n kwargs,\n files=kwargs.pop('files'))\n return self.response.content\n else:\n raise ParamValueException('params', 'Params are not complete')", "def process_request(self, request):\n request.file_uploads = {}\n\n # we only care about POST and which has form data with file.\n if request.method == 'POST' and (\n 'multipart/form-data' in request.META.get('CONTENT_TYPE', '')):\n\n testbed = request.META[TESTBED_ARG_KEY]\n wsgi_input = request.META['wsgi.input']\n wsgi_input.seek(0)\n\n fields = cgi.FieldStorage(wsgi_input, environ=request.META)\n\n for key in fields:\n field = fields[key]\n if isinstance(field, cgi.FieldStorage):\n if ('content-disposition' in field.headers and\n 'filename' in field.disposition_options):\n\n # create a mock blob info and assign it to request data\n filename = field.disposition_options['filename']\n blob_entity = testbed.get_stub('blobstore').CreateBlob(\n filename, 'fake content')\n\n # set other properties of blob info\n blob_entity['filename'] = filename\n blob_entity['content_type'] = field.headers['content-type']\n datastore.Put(blob_entity)\n\n blob_info = blobstore.BlobInfo(blob_entity)\n\n # set request data\n request.file_uploads[key] = blob_info\n request.POST[key] = filename\n\n # format blob info for Django by adding the name property.\n blob_info.name = field.disposition_options['filename']", "def create_post_data(self):\n ad = self.document_url.advancedirective\n unit_data = {\n 'full_name': ad.user.get_full_name(),\n 'state': ad.user.userdetails.state.name if ad.user.userdetails.state else None,\n 'pdf_url': 'https://%s%s' % (self.document_host, self.document_url.url),\n 'date_signed': str(ad.valid_date),\n }\n # Only submit non-blank values, to ensure valid input. (CloudFactory accepts blank values\n # on POST, even though those values are invalid, but does not accept missing keys.)\n # (The User's state can be null in our models. Others are ensured by model validations.)\n\n unit_post_data = {k: v for k, v in unit_data.items() if bool(v) is True}\n\n # When the auth callback is provided, create a callback URL with Basic\n # auth credentials:\n auth = ''\n if settings.CLOUDFACTORY_CALLBACK_AUTH:\n auth = '%s@' % settings.CLOUDFACTORY_CALLBACK_AUTH\n data = {\n \"line_id\": settings.CLOUDFACTORY_LINE_ID,\n \"callback_url\":\n 'https://%s%s%s' % (\n auth, self.document_host,\n reverse('myhpom:cloudfactory_response')),\n \"units\": [unit_post_data],\n }\n return data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Download pdf of VanTechy presentation slideshow.
def vantechy(request): return FileResponse(open('/files/presentation.pdf', 'rb'))
[ "def fetch_pdf(url, browser):\n\tpass\n\n\t# grab link page\n\n\t# search soup for pdf file\n\n\t# grab pdf file and return it", "def download(filename):\n return send_from_directory(directory='pdf', filename=filename)", "def download(urls, target_dir):\n valid_urls = FileDownloadAndCombiner.filter_valid_urls(urls)\n print('Downloading {} papers'.format(len(valid_urls)))\n for data in tqdm(valid_urls[:]):\n url = data['url'].replace('abs', 'pdf') + '.pdf'\n filename = os.path.join(target_dir, data['title'] + '.pdf')\n filename = filename.replace('\"', '').replace('/', '')\n print(url)\n print(filename)\n download(url, filename)", "def download_paper(doi, path=\"~/Downloads/\"):\n base_url = \"https://sci-hub.do/\"+ doi\n res = requests.get(base_url, verify=False)\n s = BeautifulSoup(res.content, 'html.parser')\n\n iframe = s.find('iframe')\n if iframe:\n url = iframe.get('src')\n \n filename = url.split(\"/\")[-1].split(\"#\")[0]\n filepath = path + filename\n\n print(\"====== Dowloading article from Sci-Hub ======\")\n\n os.system(f'wget -O {filepath} {url}' )\n\n print(\"====== Opening PDF ======\")\n os.system(f'xdg-open {filepath} &')\n\n return 0", "def download_file():\r\n global title_dict\r\n title=ResultsListbox.get(ResultsListbox.curselection())\r\n link=title_dict[title]\r\n file_dl=urllib.URLopener()\r\n file_dl.retrieve(link,str(title)+\".pdf\")", "def download_presentation(epObject, uc):\r\n fileDict = make_file_dict()\r\n fileDict = populate_file_dict(epObject, uc, fileDict)\r\n now = str(datetime.datetime.now().hour) + \\\r\n str(datetime.datetime.now().minute) + \\\r\n str(datetime.datetime.now().second)\r\n directoryName = epObject.Name.replace(\" \", \"\") + \"_presentation_\" + now\r\n os.mkdir(directoryName)\r\n os.chdir(directoryName)\r\n temp = tempfile.TemporaryFile()\r\n temp.write(urllib.request.urlopen(fileDict['pageUrls'][0]).read())\r\n temp.seek(0)\r\n update_page(temp, fileDict, \"index.html\", index=True)\r\n temp.close()\r\n os.mkdir(\"Pages\")\r\n os.chdir(\"Pages\")\r\n for (pageUrl, pageFileName) in zip(fileDict['pageUrls'][1:], \r\n fileDict['pageFileNames'][1:]):\r\n temp = tempfile.TemporaryFile()\r\n temp.write(urllib.request.urlopen(pageUrl).read())\r\n update_page(temp, fileDict, pageFileName)\r\n temp.close()\r\n os.chdir(\"../\")\r\n os.mkdir(\"Content\")\r\n os.chdir(\"Content\")\r\n for (fileUrl, fileId) in zip(fileDict['fileUrls'], fileDict['fileIds']):\r\n fileName = eportfolio.get_ep_object_properties(uc, fileId).\\\r\n FileName.strip()\r\n urllib.request.urlretrieve(fileUrl, fileName)\r\n os.chdir(\"../\")\r\n os.mkdir(\"Formatting\")\r\n os.chdir(\"Formatting\")\r\n for (cssUrl, cssFileName) in zip(fileDict['cssUrls'],\r\n fileDict['cssFileNames']):\r\n temp = tempfile.TemporaryFile()\r\n temp.write(urllib.request.urlopen(cssUrl).read())\r\n temp.seek(0)\r\n update_css_file(cssUrl, temp, cssFileName)\r\n temp.close()\r\n for imgUrl in fileDict['imgUrls']:\r\n fileName = imgUrl[imgUrl.rfind(\"/\"): ]\r\n if fileName.find(\"?\") > 0:\r\n fileName = fileName[: fileName.find(\"?\")]\r\n urllib.request.urlretrieve(imgUrl, fileName)\r\n os.chdir(\"../\")\r\n print(str(fileDict))\r\n return fileDict", "def serve_pdf(which, page):\n return send_file(build_path(which, 'pages', page))", "def generate_pdf():\n # replace the API url with the one you just created\n url = \"https://wf5u5hrui6.execute-api.us-west-2.amazonaws.com/default/pdf_generation\"\n response = requests.post(url)\n return response.text", "def download(self):\n self.build_pdf()\n with open(self.pdf_file, 'rb') as pdf:\n return pdf.read(), 200, {'Content-Type': 'application/pdf',\n 'Content-Disposition': 'attachment; filename=\"Invoice #{}.pdf\"'.format(self.name)}", "def test_pdf(client):\n urls = [\n reverse('pdf'),\n '%s?download' % reverse('pdf-inline'),\n ]\n for url in urls:\n response = client.get(url)\n\n assert response.status_code == 200\n\n assert response.has_header('content-disposition')\n assert response.has_header('content-length')\n assert response.has_header('content-type')\n\n assert response['content-disposition'] == 'attachment; filename=basic.pdf'\n assert int(response['content-length']) > 0\n assert response['content-type'] == 'application/pdf'", "def download_pdfs_from_site(url: str, verbose=True):\n site_url = get_site_url(url)\n html = requests.get(url).text\n\n\n all_links = get_links(html)\n pdf_links = [link for link in all_links if link.endswith('pdf')]\n pdf_links = maybe_add_full_links(pdf_links, site_url)\n \n if verbose:\n print('Found the following pdf links')\n print(pdf_links)\n pdf_links = tqdm.tqdm(pdf_links)\n for link in pdf_links:\n download_from_link(link)", "def tutorial(request):\n try:\n file_path = (settings.BASE_DIR\n + '/website_files/metropolis_tutorial.pdf')\n with open(file_path, 'rb') as f:\n response = HttpResponse(f, content_type='application/pdf')\n response['Content-Disposition'] = \\\n 'attachment; filename=\"how_to.pdf\"'\n return response\n except FileNotFoundError:\n # Should notify an admin that the file is missing.\n raise Http404()", "def download_latex(self):\n try:\n # $ Set the Arxiv Object to ensure Proper extraction\n identity,paper = self.extract_meta_from_remote(self.paper_id)\n self.identity = identity\n\n if not dir_exists(self.paper_root_path):\n os.makedirs(self.paper_root_path)\n # $ Download the paper. \n downloaded_data = arxiv.download(paper,dirpath=self.paper_root_path,slugify=lambda paper: paper.get('id').split('/')[-1],prefer_source_tarfile=True)\n return downloaded_data\n except Exception as e:\n raise ArxivAPIException(self.paper_id,str(e))", "def getPresentation():\n if len(sys.argv) != 2:\n return \"Proper arguments not provided. Aborting!!\"\n\n pathToPaper = sys.argv[1]\n\n if os.path.isfile(pathToPaper) is False:\n return \"Specified file does not exists. Aborting!!\"\n\n raw_sections = getSections(\"sections.json\", pathToPaper)\n sections = joinSections(raw_sections)\n\n collection = []\n for section in sections:\n ID = section['section']\n title = section['title']\n text = section['text']\n\n if ID == \"Section: 0\":\n slide = genTitleSlide(title)\n else:\n slide = genTextSlide(ID, title, text)\n if slide is not None:\n collection.append(slide)\n\n # Check if any images exist for this section.\n for image in images:\n if image['section'] == ID:\n #image belongs to this section. Add its slide\n slide = genImgSlide(image)\n collection.append(slide)\n \n # Check if any tables exist for this section.\n for table in tables:\n if table['section'] == ID:\n #table belongs to this section. Add its slide\n slide = genTableSlide(table)\n if slide is not None:\n collection.append(slide)\n\n\n filename = \"presentation.tex\"\n genLatex(collection, filename)\n genPDF(filename)\n \n return \"PDF successfully printed to `presentation.pdf`.\"", "def download_pdf(pdf_url):\n response = requests.get(pdf_url, allow_redirects=True)\n open('./data/raw/full.pdf', 'wb').write(response.content)", "def download_pdf_file(download_url):\n web_file = urllib.urlopen(download_url)\n filename = \"/tmp/\" + str(uuid.uuid4()) + \".pdf\"\n local_file = open(filename, 'w')\n local_file.write(web_file.read())\n web_file.close()\n local_file.close()\n return filename", "def get_ppdf(conf,inventory):\n channels=channel_list(inv)\n for chan in channels:\n fname=pdfpath(chan,conf)\n if not os.path.exists(fname):\n try:\n txt=irisws.noise_pdf.query(target=chan+'.M',starttime=conf['starttime'],endtime=conf['endtime'],format='text')\n txt=txt.decode('utf-8')\n if txt=='':\n logging.warn(\"First attempt to get PPDF for %s failed\" % (chan))\n txt=irisws.noise_pdf.query(target=chan+'.M',starttime=conf['starttime'],endtime=conf['endtime'],format='text')\n txt=txt.decode('utf-8')\n else:\n logging.debug(\"Got data for %s now save to file %s\" % (chan,fname))\n ofh=open(fname,'w')\n ofh.write(txt)\n ofh.close()\n except:\n logging.warn(\"Unable to download or save PPDF for %s\" % (chan))\n return len(channels)", "def download_pdfs():\n try:\n # create the download folder if it does not exist already\n Path(paho_raw_reports_dir).mkdir(parents=True, exist_ok=True)\n # remove all current pdfs in the download folder\n filelist = [ f for f in os.listdir(paho_raw_reports_dir) if f.endswith(\".pdf\") ]\n for f in filelist:\n os.remove(os.path.join(paho_raw_reports_dir, f))\n # open the browser\n logging.info(\"Now opening the Firefox browser\")\n options = Options()\n options.headless = True\n options.accept_insecure_certs = True\n profile = FirefoxProfile()\n profile.set_preference('security.tls.version.enable-deprecated', True)\n # set the download location of the pdfs and remove the download prompt\n profile.set_preference(\"browser.altClickSave\", True)\n profile.set_preference(\"browser.download.folderList\", 2)\n profile.set_preference(\"browser.download.panel.shown\", False)\n profile.set_preference(\"browser.download.manager.showWhenStarting\", False)\n profile.set_preference(\"browser.download.dir\", paho_raw_reports_dir)\n profile.set_preference(\"browser.download.useDownloadDir\", True)\n profile.set_preference(\"browser.helperApps.neverAsk.saveToDisk\", \n \"application/pdf,application/x-pdf,application/octet-stream,application/x-winzip,application/x-gzip\")\n profile.set_preference(\"browser.download.manager.alertOnEXEOpen\", False)\n profile.set_preference(\"browser.download.manager.showWhenStarting\", False);\n profile.set_preference(\"browser.download.manager.focusWhenStarting\", False);\n profile.set_preference(\"browser.helperApps.alwaysAsk.force\", False);\n profile.set_preference(\"browser.download.manager.alertOnEXEOpen\", False);\n profile.set_preference(\"browser.download.manager.closeWhenDone\", True);\n profile.set_preference(\"browser.download.manager.showAlertOnComplete\", False);\n profile.set_preference(\"browser.download.manager.useWindow\", False);\n profile.set_preference(\"services.sync.prefs.sync.browser.download.manager.showWhenStarting\", False);\n profile.set_preference(\"pdfjs.disabled\", True)\n driver = webdriver.Firefox(profile, options=options)\n # Go the PAHO website that holds the reports\n reports_present_on_page = True\n page_number = 0\n pahoreporturl = \"https://www.paho.org/en/technical-reports?topic=4922&d%5Bmin%5D=&d%5Bmax%5D=&page=\"+str(page_number)\n while reports_present_on_page:\n logging.info(\"Navigating to \"+pahoreporturl)\n driver.get(pahoreporturl)\n # get all urls containing certain keywords on this page\n report_links_elements = driver.find_elements_by_partial_link_text(\"COVID-19 cases\")\n # store all of the urls in each element\n report_links = []\n for report_link_element in report_links_elements:\n report_links.append(report_link_element.get_attribute('href'))\n # now go through each url in the list\n for report_link in report_links:\n # navigate to each url\n driver.get(report_link)\n # once the page has loaded, click the download link\n download_link = driver.find_element_by_link_text(\"DOWNLOAD\")\n download_link.click()\n logging.info(\"File downloaded from: \"+download_link.get_attribute('href'))\n # check if we have any elements that we're interested in on this page, to control the loop\n if report_links_elements:\n reports_present_on_page = True\n page_number += 1\n pahoreporturl = \"https://www.paho.org/en/technical-reports?topic=4922&d%5Bmin%5D=&d%5Bmax%5D=&page=\"+str(page_number)\n else:\n reports_present_on_page = False\n logging.info(\"No more reports on page. Breaking loop.\")\n return 0\n except:\n logging.info(\"Encountered an issue while trying to download the pdfs.\")\n raise\n finally:\n if 'driver' in locals() and driver is not None:\n # Always close the browser\n driver.quit()\n logging.info(\"Successfully closed web browser.\")\n logging.info(\"Completed downloading of all COVID19 pdfs from PAHO website.\")", "def scrape_pdfs(db):\n process = CrawlerProcess()\n process.crawl(PdfSpider, db=db)\n process.start()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
List all available charts
def list_charts(): charts_root = Path(R".\charm\data\charts") charts = list(charts_root.rglob("*.chart")) return charts
[ "def charts(self, **kwargs):\n return [Chart(result) for result in self._invoke('charts', kwargs)]", "def list(self, **params):\n\n _, _, account_charts = self.http_client.get(\"/accountcharts\", params=params)\n return account_charts", "def charts(self):\n return self._charts", "def get_chart_list(host: str = '127.0.0.1:19999', starts_with: str = None) -> list:\n\n url = f\"http://{host}/api/v1/charts\"\n r = requests.get(url)\n charts = r.json().get('charts')\n chart_list = [chart for chart in charts]\n if starts_with:\n chart_list = [chart for chart in chart_list if chart.startswith(starts_with)]\n return chart_list", "def getCharts(self):\n \n return self.testCharts", "def get_available_datasets() -> List[Dataset]:\n res = requests.get(\"https://data.kockatykalendar.sk/index.json\")\n if res.status_code != 200:\n raise ConnectionError(\"KockatyKalendar.sk API returned wrong status code. 200 != %d.\" % res.status_code)\n\n return [Dataset(ds) for ds in res.json()]", "def charts(self):\n return self.properties.get('charts',\n EntityCollection(self.context, WorkbookChart,\n ResourcePath(\"charts\", self.resource_path)))", "def charts(self, charts):\n\n self.container['charts'] = charts", "def list_graphs():\n\n db_inst = DB()\n db_inst.get_database()\n graphs = db_inst.database.graphs()\n graphs = util.filter_graphs(graphs)\n\n return graphs", "def get_charts(self, period=\"d\", size=\"l\", chart_type=\"c\", ta=\"1\"):\n\n encoded_payload = urlencode(\n {\"ty\": chart_type, \"ta\": ta, \"p\": period, \"s\": size}\n )\n\n sequential_data_scrape(\n scrape.download_chart_image,\n [\n f\"https://finviz.com/chart.ashx?{encoded_payload}&t={row.get('Ticker')}\"\n for row in self.data\n ],\n self._user_agent,\n )", "def my_charts(page_num=1):\n # Download charts that belong to the current user\n charts = Chart.query.filter_by(owner_id=current_user.id).paginate(page_num)\n return render_template('reports/my_charts.html', charts=charts)", "def load_supported_charts_dashboard() -> None:\n\n database = get_example_database()\n with database.get_sqla_engine_with_context() as engine:\n schema = inspect(engine).default_schema_name\n\n tbl_name = \"birth_names\"\n table_exists = database.has_table_by_name(tbl_name, schema=schema)\n\n if table_exists:\n table = get_table_connector_registry()\n obj = (\n db.session.query(table)\n .filter_by(table_name=tbl_name, schema=schema)\n .first()\n )\n create_slices(obj)\n\n print(\"Creating the dashboard\")\n\n db.session.expunge_all()\n dash = db.session.query(Dashboard).filter_by(slug=DASH_SLUG).first()\n\n if not dash:\n dash = Dashboard()\n\n js = textwrap.dedent(\n \"\"\"\n{\n \"CHART-1\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-1\",\n \"ROW-1\"\n ],\n \"id\": \"CHART-1\",\n \"meta\": {\n \"chartId\": 1,\n \"height\": 50,\n \"sliceName\": \"Big Number\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-2\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-1\",\n \"ROW-1\"\n ],\n \"id\": \"CHART-2\",\n \"meta\": {\n \"chartId\": 2,\n \"height\": 50,\n \"sliceName\": \"Big Number with Trendline\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-3\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-1\",\n \"ROW-1\"\n ],\n \"id\": \"CHART-3\",\n \"meta\":{\n \"chartId\": 3,\n \"height\": 50,\n \"sliceName\": \"Table\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-4\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-1\",\n \"ROW-2\"\n ],\n \"id\": \"CHART-4\",\n \"meta\": {\n \"chartId\": 4,\n \"height\": 50,\n \"sliceName\": \"Pivot Table\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-5\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-1\",\n \"ROW-2\"\n ],\n \"id\": \"CHART-5\",\n \"meta\": {\n \"chartId\": 5,\n \"height\": 50,\n \"sliceName\": \"Time-Series Line Chart\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-6\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-1\",\n \"ROW-2\"\n ],\n \"id\": \"CHART-6\",\n \"meta\": {\n \"chartId\": 6,\n \"height\": 50,\n \"sliceName\": \"Time-Series Bar Chart V2\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-7\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-1\",\n \"ROW-3\"\n ],\n \"id\": \"CHART-7\",\n \"meta\": {\n \"chartId\": 7,\n \"height\": 50,\n \"sliceName\": \"Time-Series Area Chart\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-8\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-1\",\n \"ROW-3\"\n ],\n \"id\": \"CHART-8\",\n \"meta\": {\n \"chartId\": 8,\n \"height\": 50,\n \"sliceName\": \"Time-Series Scatter Chart\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-9\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-1\",\n \"ROW-3\"\n ],\n \"id\": \"CHART-9\",\n \"meta\": {\n \"chartId\": 9,\n \"height\": 50,\n \"sliceName\": \"Pie Chart\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-10\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-1\",\n \"ROW-4\"\n ],\n \"id\": \"CHART-10\",\n \"meta\": {\n \"chartId\": 10,\n \"height\": 50,\n \"sliceName\": \"Bar Chart\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-11\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-1\",\n \"ROW-4\"\n ],\n \"id\": \"CHART-11\",\n \"meta\": {\n \"chartId\": 11,\n \"height\": 50,\n \"sliceName\": \"% Rural\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-12\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-2\",\n \"ROW-5\"\n ],\n \"id\": \"CHART-12\",\n \"meta\": {\n \"chartId\": 12,\n \"height\": 50,\n \"sliceName\": \"Box Plot Chart\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-13\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-2\",\n \"ROW-5\"\n ],\n \"id\": \"CHART-13\",\n \"meta\": {\n \"chartId\": 13,\n \"height\": 50,\n \"sliceName\": \"Bubble Chart\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-14\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-2\",\n \"ROW-5\"\n ],\n \"id\": \"CHART-14\",\n \"meta\": {\n \"chartId\": 14,\n \"height\": 50,\n \"sliceName\": \"Calendar Heatmap\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-15\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-2\",\n \"ROW-6\"\n ],\n \"id\": \"CHART-15\",\n \"meta\": {\n \"chartId\": 15,\n \"height\": 50,\n \"sliceName\": \"Chord Chart\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-16\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-2\",\n \"ROW-6\"\n ],\n \"id\": \"CHART-16\",\n \"meta\": {\n \"chartId\": 16,\n \"height\": 50,\n \"sliceName\": \"Time-Series Percent Change Chart\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-17\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-2\",\n \"ROW-6\"\n ],\n \"id\": \"CHART-17\",\n \"meta\": {\n \"chartId\": 17,\n \"height\": 50,\n \"sliceName\": \"Time-Series Generic Chart\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-18\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-2\",\n \"ROW-7\"\n ],\n \"id\": \"CHART-18\",\n \"meta\": {\n \"chartId\": 18,\n \"height\": 50,\n \"sliceName\": \"Time-Series Smooth Line Chart\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-19\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-2\",\n \"ROW-7\"\n ],\n \"id\": \"CHART-19\",\n \"meta\": {\n \"chartId\": 19,\n \"height\": 50,\n \"sliceName\": \"Time-Series Step Line Chart\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-20\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-2\",\n \"ROW-7\"\n ],\n \"id\": \"CHART-20\",\n \"meta\": {\n \"chartId\": 20,\n \"height\": 50,\n \"sliceName\": \"Funnel Chart\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-21\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-2\",\n \"ROW-8\"\n ],\n \"id\": \"CHART-21\",\n \"meta\": {\n \"chartId\": 21,\n \"height\": 50,\n \"sliceName\": \"Gauge Chart\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-22\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-2\",\n \"ROW-8\"\n ],\n \"id\": \"CHART-22\",\n \"meta\": {\n \"chartId\": 22,\n \"height\": 50,\n \"sliceName\": \"Heatmap Chart\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-23\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-2\",\n \"ROW-8\"\n ],\n \"id\": \"CHART-23\",\n \"meta\": {\n \"chartId\": 23,\n \"height\": 50,\n \"sliceName\": \"Line Chart\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-24\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-2\",\n \"ROW-9\"\n ],\n \"id\": \"CHART-24\",\n \"meta\": {\n \"chartId\": 24,\n \"height\": 50,\n \"sliceName\": \"Mixed Chart\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-25\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-2\",\n \"ROW-9\"\n ],\n \"id\": \"CHART-25\",\n \"meta\": {\n \"chartId\": 25,\n \"height\": 50,\n \"sliceName\": \"Partition Chart\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-26\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-2\",\n \"ROW-9\"\n ],\n \"id\": \"CHART-26\",\n \"meta\": {\n \"chartId\": 26,\n \"height\": 50,\n \"sliceName\": \"Radar Chart\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-27\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-2\",\n \"ROW-10\"\n ],\n \"id\": \"CHART-27\",\n \"meta\": {\n \"chartId\": 27,\n \"height\": 50,\n \"sliceName\": \"Nightingale Chart\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-28\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-2\",\n \"ROW-10\"\n ],\n \"id\": \"CHART-28\",\n \"meta\": {\n \"chartId\": 28,\n \"height\": 50,\n \"sliceName\": \"Sankey Chart\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-29\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-2\",\n \"ROW-10\"\n ],\n \"id\": \"CHART-29\",\n \"meta\": {\n \"chartId\": 29,\n \"height\": 50,\n \"sliceName\": \"Sunburst Chart\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-30\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-2\",\n \"ROW-11\"\n ],\n \"id\": \"CHART-30\",\n \"meta\": {\n \"chartId\": 30,\n \"height\": 50,\n \"sliceName\": \"Treemap Chart\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-31\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-2\",\n \"ROW-11\"\n ],\n \"id\": \"CHART-31\",\n \"meta\": {\n \"chartId\": 31,\n \"height\": 50,\n \"sliceName\": \"Treemap V2 Chart\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-32\": {\n \"children\": [],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-2\",\n \"ROW-11\"\n ],\n \"id\": \"CHART-32\",\n \"meta\": {\n \"chartId\": 32,\n \"height\": 50,\n \"sliceName\": \"Word Cloud Chart\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"GRID_ID\": {\n \"children\": [],\n \"id\": \"GRID_ID\",\n \"type\": \"GRID\"\n },\n \"HEADER_ID\": {\n \"id\": \"HEADER_ID\",\n \"meta\": {\n \"text\": \"Supported Charts\"\n },\n \"type\": \"HEADER\"\n },\n \"TABS-TOP\": {\n \"children\": [\n \"TAB-TOP-1\",\n \"TAB-TOP-2\"\n ],\n \"id\": \"TABS-TOP\",\n \"type\": \"TABS\"\n },\n \"TAB-TOP-1\": {\n \"id\": \"TAB_TOP-1\",\n \"type\": \"TAB\",\n \"meta\": {\n \"text\": \"Tier 1\",\n \"defaultText\": \"Tab title\",\n \"placeholder\": \"Tab title\"\n },\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\"\n ],\n \"children\": [\n \"ROW-1\",\n \"ROW-2\",\n \"ROW-3\",\n \"ROW-4\"\n ]\n },\n \"TAB-TOP-2\": {\n \"id\": \"TAB_TOP-2\",\n \"type\": \"TAB\",\n \"meta\": {\n \"text\": \"Tier 2\",\n \"defaultText\": \"Tab title\",\n \"placeholder\": \"Tab title\"\n },\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\"\n ],\n \"children\": [\n \"ROW-5\",\n \"ROW-6\",\n \"ROW-7\",\n \"ROW-8\",\n \"ROW-9\",\n \"ROW-10\",\n \"ROW-11\"\n ]\n },\n \"ROOT_ID\": {\n \"children\": [\n \"TABS-TOP\"\n ],\n \"id\": \"ROOT_ID\",\n \"type\": \"ROOT\"\n },\n \"ROW-1\": {\n \"children\": [\n \"CHART-1\",\n \"CHART-2\",\n \"CHART-3\"\n ],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-1\"\n ],\n \"id\": \"ROW-1\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\"\n },\n \"type\": \"ROW\"\n },\n \"ROW-2\": {\n \"children\": [\n \"CHART-4\",\n \"CHART-5\",\n \"CHART-6\"\n ],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-1\"\n ],\n \"id\": \"ROW-2\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\"\n },\n \"type\": \"ROW\"\n },\n \"ROW-3\": {\n \"children\": [\n \"CHART-7\",\n \"CHART-8\",\n \"CHART-9\"\n ],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-1\"\n ],\n \"id\": \"ROW-3\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\"\n },\n \"type\": \"ROW\"\n },\n \"ROW-4\": {\n \"children\": [\n \"CHART-10\",\n \"CHART-11\"\n ],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-1\"\n ],\n \"id\": \"ROW-4\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\"\n },\n \"type\": \"ROW\"\n },\n \"ROW-5\": {\n \"children\": [\n \"CHART-12\",\n \"CHART-13\",\n \"CHART-14\"\n ],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-2\"\n ],\n \"id\": \"ROW-5\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\"\n },\n \"type\": \"ROW\"\n },\n \"ROW-6\": {\n \"children\": [\n \"CHART-15\",\n \"CHART-16\",\n \"CHART-17\"\n ],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-2\"\n ],\n \"id\": \"ROW-6\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\"\n },\n \"type\": \"ROW\"\n },\n \"ROW-7\": {\n \"children\": [\n \"CHART-18\",\n \"CHART-19\",\n \"CHART-20\"\n ],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-2\"\n ],\n \"id\": \"ROW-7\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\"\n },\n \"type\": \"ROW\"\n },\n \"ROW-8\": {\n \"children\": [\n \"CHART-21\",\n \"CHART-22\",\n \"CHART-23\"\n ],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-2\"\n ],\n \"id\": \"ROW-8\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\"\n },\n \"type\": \"ROW\"\n },\n \"ROW-9\": {\n \"children\": [\n \"CHART-24\",\n \"CHART-25\",\n \"CHART-26\"\n ],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-2\"\n ],\n \"id\": \"ROW-9\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\"\n },\n \"type\": \"ROW\"\n },\n \"ROW-10\": {\n \"children\": [\n \"CHART-27\",\n \"CHART-28\",\n \"CHART-29\"\n ],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-2\"\n ],\n \"id\": \"ROW-10\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\"\n },\n \"type\": \"ROW\"\n },\n \"ROW-11\": {\n \"children\": [\n \"CHART-30\",\n \"CHART-31\",\n \"CHART-32\"\n ],\n \"parents\": [\n \"ROOT_ID\",\n \"TABS-TOP\",\n \"TAB-TOP-2\"\n ],\n \"id\": \"ROW-11\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\"\n },\n \"type\": \"ROW\"\n },\n \"DASHBOARD_VERSION_KEY\": \"v2\"\n}\n \"\"\"\n )\n\n pos = json.loads(js)\n dash.slices = update_slice_ids(pos)\n dash.dashboard_title = \"Supported Charts Dashboard\"\n dash.position_json = json.dumps(pos, indent=2)\n dash.slug = DASH_SLUG\n db.session.commit()", "def process_charts(self):\n\n # Prints the chart list to console\n self.print_data_list()\n self.save_data_list('Duval')", "def chart_finder(self, keyword):\n\n data, _ = self.helm_client.search(keyword)\n return data", "def charts(self, charts):\n\n self._charts = charts", "def available_plots(self):\n return self.visualizer.available_plots()", "def get_charts(self):\n\n from stockprediction.machine_learning import StockMachineLearning\n from stockprediction.utils import dataframe_utils as df_utils\n\n dataset = self.get_data()\n ti_plot = self.plot_technical_indicators()\n\n ml_dataset = self.get_ml_data()\n ml_dataset = df_utils.format_stock_dataset_for_ml(dataset)\n ml = StockMachineLearning(ml_dataset, self.ticker)\n test_plot = ml.plot_test_predictions()\n fut_plot = ml.plot_future_predictions\n\n return ti_plot, test_plot, fut_plot", "def test_read_charts(self, chart, charts):\n self.chart = charts\n chart_objects = chart.objects.all()\n if not chart_objects:\n raise AssertionError(\"Could not read charts.\")", "def get_all():\n return Dashboard.query.all()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Convert a chart Path object to a string path relative to .\charm\data\charts
def strch(chart): charts_root = Path(R".\charm\data\charts") return str(chart.relative_to(charts_root))
[ "def get_data_path(path):\n\n data_path = Path(self.kard.meta.get('data_path', 'data'))\n\n if data_path.is_absolute():\n return str(data_path / path)\n\n return str(self.kard_folder_path / self.kard.name / data_path /\n path)", "def path(self):\n if self._path is None:\n self._path = Path(\n os.environ.get(\n \"RAPIDS_DATASET_ROOT_DIR\", Path.home() / \".cugraph/datasets\"\n )\n )\n return self._path", "def get_path(self):\n raise NotImplementedError(\"This asset does not support absolute paths\")", "def resolve_path_o(model_dict):\n\n dataset = model_dict['dataset']\n channels = model_dict['channels']\n script_dir = dirname(dirname(dirname(os.path.abspath(__file__))))\n rel_path_o = 'output_data/' + dataset\n if dataset == 'GTSRB':\n rel_path_o += str(channels)\n abs_path_o = os.path.join(script_dir, rel_path_o + '/')\n if not os.path.exists(abs_path_o):\n os.makedirs(abs_path_o)\n return abs_path_o", "def get_refds_path(cls, dataset):\n # theoretically a dataset could come in as a relative path -> resolve\n if dataset is None:\n return dataset\n refds_path = dataset.path if isinstance(dataset, Dataset) \\\n else Dataset(dataset).path\n if refds_path:\n refds_path = str(resolve_path(refds_path))\n return refds_path", "def get_chart_file(j2_file_name):\n return os.path.join(get_path(), 'charts', j2_file_name)", "def ypath_from_sheet_to_base(self, sheet):\n ypath = self.ypath_from_base_to_sheet(sheet)\n return self.ypath_values_reverse(ypath)", "def data_path(path: object) -> str:\n return f\"data/{path}\"", "def get_dataset_path(dataset):\n data_dir = get_dataset_dir()\n dataset_path = pathlib.Path(data_dir).joinpath(dataset)\n return str(dataset_path)", "def _to_absolute_path(v: Union[str, Path]):\n return to_absolute_path(v)", "def get_relative_path(self, path):\n if isinstance(path, str):\n path = pathlib.Path(path)\n\n rpath = path.relative_to(self.root)\n return str(rpath)", "def get_realpath(cls, path_str):\n if path_str.startswith('/'):\n return path_str\n return os.path.abspath(os.path.join(cls.apollo_root, path_str))", "def to_path(self):\n return self._to_path", "def path(self):\n if self.origin:\n return six.text_type(self.origin / self.relpath)\n else:\n return six.text_type(self.relpath)", "def path(self) -> str:\n return (\n f\"/projects/{self.project}/datasets/{self.dataset_id}\"\n f\"/tables/{self.table_id}\"\n )", "def path_to_str(source: pathlib.Path) -> str:\n return str(source)", "def to_html_path(graph: BELGraph, path: str, chart: Optional[str] = None) -> None:\n with open(path, 'w') as file:\n to_html_file(graph=graph, file=file, chart=chart)", "def _GetRelativeLabelPath(self, label):\n\n if self._AreLabelsPaths():\n return label\n\n return os.path.join(*self.GetLabelComponents(label))", "def path_to_related(self, path):\n # self.path = \"...functional/fixtures/img/logo.png\"\n # path = \"...functional/fixtures/docs/index.md\"\n current = self.dir\n\n while not path.startswith(current.dir.path):\n current = current.dir.parent.dir\n\n remaining = current.relative(self.path)\n\n level = current.relative(path).count(os.sep)\n\n way_back = os.sep.join(['..'] * level) or '.'\n result = \"{0}/{1}\".format(way_back, remaining)\n\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the map grid cell as obstacle
def set_obstacle(self, pos: tuple): if self.within_map(pos): self.map[round(pos[0]), round(pos[1])] = OBSTACLE return True else: return False
[ "def set_obstacle(self):\n self.state = self.Obstacle", "def add_obstacle(self, x, y):\n self.BOARD[y][x].traversable = False\n self.board_array[y][x] = 1", "def put(self, cell):\n if cell.x >= 0 and cell.x < len(self._grid[0]) and \\\n cell.y >= 0 and cell.y < len(self._grid):\n self._grid[cell.y][cell.x] = cell.c", "def update_obstacles(self, new_obs):\n self.obstacles = new_obs", "def set_cell_to_hole(self):\n self.tick = \"H\"\n self.is_hole = True\n self.is_active = False", "def setCell(self, (xIndex, yIndex)):\n changed = self.grid[xIndex][yIndex] == False\n self.grid[xIndex][yIndex] = True\n if changed:\n self.drawSquare((xIndex, yIndex))", "def set_cell(self,i,j,cell):\n self.board[i][j] = cell", "def set_cell(self, cell, value):\n x,y = cell\n self.grid[y][x] = value", "def make_cell_change(self, x, y):\n self.cells[x][y] = 1 if not self.cells[x][y] else 0", "def neighborhood(self, cell):\n pass", "def init_map(self, obstacle_rate=0.9):\n n = self.size()\n\n map_obstacles = [] # np.zeros((n, n)) # 1: obstacle, 0: non-obstacle\n \n for i in range(n):\n # We only need 2 bit to encode 1/0 for each element of NumberArray\n row = NumberArray(2, n)\n for j in range(n):\n if i == j:\n # map_obstacles[i][j] = 0\n row[j] = 0\n elif i > j:\n # map_obstacles[i][j] = map_obstacles[j][i]\n row[j] = map_obstacles[j][i]\n else:\n # map_obstacles[i][j] = 1 if random.random() > 0.9 else 0\n row[j] = 1 if random.random() > obstacle_rate else 0\n map_obstacles.append(row)\n\n self.map_obstacle = map_obstacles", "def set_tile(self, row, col, value):\n self.grid[row][col] = value", "def set_tile(self, row, col, value):\r\n \r\n self._grid[row][col] = value", "def set_tile(self, row, col, value):\n self.grid[row][col] = value\n pass", "def set_onmap(self, mapping):\n mapping.set_bad_guy(self.y_pos, self.x_pos)", "def setNeighbors(self):\n for cellIndex in range(len(self.cells)):\n cell = self.cells[cellIndex]\n\n #Checks the 8 cells around the living one. \n for neighborsX in range(cell.x - 1, cell.x + 2):\n for neighborsY in range(cell.y - 1, cell.y + 2):\n\n #If the position is outside the world, loop around.\n neighborsX = neighborsX % self.screen.worldSize\n neighborsY = neighborsY % self.screen.worldSize\n\n #Skipping itself. Becouse we do not want to calculate itself as a neighbor\n if(neighborsX == cell.x and neighborsY == cell.y):\n continue\n else:\n #Checks if a cell exist at neighborsX, neighborsY\n cellToCheck = self.getCellFromPosition(neighborsX, neighborsY)\n if(cellToCheck != False):\n #Add one to the neighbor var if there already exist and cell for the given position.\n cellToCheck.numOfNeighbor += 1\n else:\n #Creates a new cell if it do not exist any.\n newCell = Cell(self.screen, neighborsX, neighborsY, True)\n newCell.numOfNeighbor += 1\n self.cells.append(newCell)", "def set_grid (self,value = None): \n for col in range(self.num_cols):\n for row in range(self.num_rows):\n if value == None :\n cell_value = random.choice([0,1])\n else :\n cell_value = value\n self.grids[self.active_grid][row][col] = cell_value", "def __draw_obstacles(self):\n self.canvas.delete(OBSTACLES_TAG)\n for i in range(0, PACMAN_BOARD_SIDE_SQUARES_NUMBER):\n for j in range(0, PACMAN_BOARD_SIDE_SQUARES_NUMBER):\n if self.board[j][i] == 1:\n self.canvas.create_rectangle(\n PACMAN_BOARD_WINDOW_MARGIN + PACMAN_BOARD_SQUARE_SIDE_LENGTH * i,\n PACMAN_BOARD_WINDOW_MARGIN + PACMAN_BOARD_SQUARE_SIDE_LENGTH * j,\n PACMAN_BOARD_WINDOW_MARGIN + PACMAN_BOARD_SQUARE_SIDE_LENGTH * (i + 1),\n PACMAN_BOARD_WINDOW_MARGIN + PACMAN_BOARD_SQUARE_SIDE_LENGTH * (j + 1),\n fill=BOARD_OBSTACLES_COLOR,\n tag=OBSTACLES_TAG\n )", "def in_cell(self):\n for player in self.players:\n for cell in self.cell_lst:\n if player.x in cell[0] and player.y in cell[1]:\n player.current_cell = cell\n break" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is the main script for the bigmacc process. It iteartes through various CEA and bigmacc operations for each key (i.e. 01011101). It ends by saving a sample of the hourly results across the key for each building in a netcdf and then wiping the project files to reset them for the next iteration.
def run(config): locator = cea.inputlocator.InputLocator(config.scenario) print('Key in run') print(config.bigmacc.key) i = config.bigmacc.key print(i) # SCENARIO SETUP --- config.general.project = os.path.join(config.bigmacc.data, config.general.parent, i) print(config.general.project) cea.datamanagement.data_initializer.main(config) # use the scenario code to set the year for the lca and other operations that need the current year pathway_code = config.general.parent pathway_items = pathway_code.split('_') scenario_year = int(pathway_items[1]) config.emissions.year_to_calculate = scenario_year bigmacc_outputs_path = os.path.join(config.bigmacc.data, config.general.parent, 'bigmacc_out', config.bigmacc.round) scen_check = pd.read_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'), index_col='Unnamed: 0') experiment_key = 'exp_{}'.format(i) print(experiment_key) keys = [int(x) for x in str(i)] if experiment_key in scen_check['Experiments'].values.tolist(): print('Experiment was finished previously, moving to next.') pass else: print('START: experiment {}.'.format(i)) # INITIALIZE TIMER --- t0 = time.perf_counter() if os.path.exists(os.path.join(config.bigmacc.data, config.general.parent, i)): print(' - Folder exists for experiment {}.'.format(i)) else: os.mkdir(os.path.join(config.bigmacc.data, config.general.parent, i)) print(' - Folder does not exist for experiment {}, creating now.'.format(i)) # run the archetype mapper to leverage the newly loaded typology file and set parameters print(' - Running archetype mapper for experiment {} to remove changes made in the last experiment.'.format(i)) cea.datamanagement.archetypes_mapper.main(config) # run the rule checker to set the scenario parameters print(' - Running rule checker for experiment {}.'.format(i)) cea.bigmacc.bigmacc_rules.main(config) # SIMULATIONS --- print(' - Run radiation is {}.'.format(config.bigmacc.runrad)) print(' - Write sensor data is {}.'.format(config.radiation.write_sensor_data)) # checking on need for radiation simulation if config.bigmacc.runrad == True: # this nested statement is for when we rerun the simulations and no longer need to run the unique radiation if config.bigmacc.rerun != True: print(' - Running radiation simulation for experiment {}.'.format(i)) if os.path.exists(locator.get_radiation_building('B000')): print(' - Radiation folder exists for experiment {}, copying.'.format(i)) else: print(' - Radiation running for experiment {}.'.format(i)) cea.resources.radiation_daysim.radiation_main.main(config) else: # print(' - Copying radiation simulation data from previous run for experiment {}.'.format(i)) old_rad_files = os.path.join(config.bigmacc.data, config.general.parent, i, config.general.scenario_name, 'outputs', 'data', 'solar-radiation') # distutils.dir_util.copy_tree(old_rad_files, locator.get_solar_radiation_folder()) else: radfiles = config.bigmacc.copyrad # print(' - Copying radiation results from {}.'.format(radfiles)) # distutils.dir_util.copy_tree(radfiles, locator.get_solar_radiation_folder()) print(' - Experiment {} does not require new radiation simulation.'.format(i)) # running demand forecasting if os.path.exists(locator.get_schedule_model_file('B000')): print(' - Schedules exist for experiment {}.'.format(i)) else: print(' - Schedule maker running for experiment {}.'.format(i)) schedule_maker.main(config) # check to see if we need to rerun demand or if we can copy if config.bigmacc.rerun != True: print(' - Running demand simulation for experiment {}.'.format(i)) cea.demand.demand_main.main(config) else: if keys[0] == 1: print(' - Running demand simulation for experiment {}.'.format(i)) cea.demand.demand_main.main(config) elif keys[6] == 1: print(' - Running demand simulation for experiment {}.'.format(i)) cea.demand.demand_main.main(config) else: cea.demand.demand_main.main(config) # print(' - Looking for demand results data from previous run for experiment {}.'.format(i)) # old_demand_files = os.path.join(config.bigmacc.data, config.general.parent, i, # config.general.scenario_name, 'outputs', 'data', 'demand') # if os.path.exists(old_demand_files): # # print(' - Copy demand results files from previous run of experiment {}.'.format(i)) # # distutils.dir_util.copy_tree(old_demand_files, locator.get_demand_results_folder()) # pass # else: # print(' - No results found.') # print(' - Running demand simulation for experiment {}.'.format(i)) # cea.demand.demand_main.main(config) if config.bigmacc.pv == True: print(' - Run PV is {}.'.format(config.bigmacc.pv)) if config.bigmacc.rerun == True: print(' - Looking for radiation simulation data from previous run for experiment {}.'.format(i)) old_pv_files = os.path.join(config.bigmacc.data, config.general.parent, i, config.general.scenario_name, 'outputs', 'data', 'potentials', 'solar') if os.path.exists(old_pv_files): # print(' - Copying PV files from previous run of experiment {}.'.format(i)) # distutils.dir_util.copy_tree(old_pv_files, locator.solar_potential_folder()) pass else: print(' - PV files do not exist for previous run of experiment {} at {}.'.format(i, old_pv_files)) print(' - Running PV simulation for experiment {}.'.format(i)) photovoltaic.main(config) else: # if PV simulation is needed, run it. print(' - Running PV simulation for experiment {}.'.format(i)) photovoltaic.main(config) print('Run water-body exchange is {}.'.format(config.bigmacc.water)) # if water-body simulation is needed, run it. if config.bigmacc.water == True: print(' - Running water body simulation for experiment {}.'.format(i)) water.main(config) # recalculating the supply split between grid and ng in the websrook DH if keys[4] == 1: print(' - Do not run district heat recalculation.') else: print(' - Run district heat recalculation.') cea.bigmacc.wesbrook_DH.main(config) if keys[7] == 1: print(' - PV use detected. Adding PV generation to demand files.') util.write_pv_to_demand(config) else: print(' - No PV use detected.') # running the emissions and costing calculations print(' - Run cost and emissions scripts.') cea.analysis.costs.system_costs.main(config) cea.analysis.lca.main.main(config) # clone out the simulation inputs and outputs directory print(' - Transferring results directory for experiment {}.'.format(i)) new_inputs_path = os.path.join(config.bigmacc.data, config.general.parent, i, config.general.scenario_name, 'inputs') new_outputs_path = os.path.join(config.bigmacc.data, config.general.parent, i, config.general.scenario_name, 'outputs', 'data') if config.bigmacc.rerun != True: distutils.dir_util.copy_tree(locator.get_data_results_folder(), new_outputs_path) distutils.dir_util.copy_tree(locator.get_input_folder(), new_inputs_path) time_elapsed = time.perf_counter() - t0 # save log information log_df = pd.read_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'), index_col='Unnamed: 0') log_df = log_df.append(pd.DataFrame({'Experiments': 'exp_{}'.format(i), 'Completed': 'True', 'Experiment Time': '%d.2 seconds' % time_elapsed, 'Unique Radiation': config.bigmacc.runrad}, index=[0]), ignore_index=True) log_df.to_csv(os.path.join(bigmacc_outputs_path, 'logger.csv')) log_df.to_csv(r"C:\Users\justi\Desktop\126logger_backup.csv", ) # write netcdf of hourly_results netcdf_writer.main(config, time='hourly') if config.bigmacc.rerun != True: shutil.rmtree(locator.get_costs_folder()) shutil.rmtree(locator.get_demand_results_folder()) shutil.rmtree(locator.get_lca_emissions_results_folder()) shutil.rmtree(locator.get_solar_radiation_folder()) shutil.rmtree(locator.get_potentials_folder()) else: print(' - Rerun does not require purging of the files.') # when the setpoint is changed it is in a deeper database than the archetypes mapper can reach so reset it here if keys[0] == 1: cea.datamanagement.data_initializer.main(config) else: pass print('END: experiment {}. \n'.format(i))
[ "def main():\n start = 1554994269 # unix timestamp, fixed for reproducability\n stop = start + 850 * 61 # number of acqs * time between acqs\n sampling_rate = 512. # Hz\n\n # Nyquist freq needs to be larger than frequency of J-peaks\n nyquist = sampling_rate / 2 + 1\n assert nyquist > 250\n\n # Test single mass for now\n mass = 2e-15\n result = run_sim(mass, start, stop, sampling_rate)\n\n sim_name = 'sim_mass_{:g}_rate_{:g}.npz'.format(mass, sampling_rate)\n np.savez(sim_name, times=result[0], amplitudes=result[1])\n print('saved: {}'.format(sim_name))", "def main():\n # checking the directory\n cwd = os.getcwd()\n print(f'The working directory: {cwd}')\n # counting time \n start_time = time.process_time()\n # passing args\n arg = parse_arguments()\n sub_dir = arg.sub_dir\n dir_out = arg.dir_out\n file_amb = 'csv_to_clean'\n names_ambigous = defaultdict(str)\n with open(file_amb, 'r') as fh:\n for line in fh:\n name = line.strip().split('/')[2]\n names_ambigous[name] = names_ambigous.get(name, '')\n names_ambigous[name] += line.strip()\n print(f'number files: {len(names_ambigous)}')\n # checking if the output directory exist\n # if not make it\n f_pwd = os.path.join('Results', 'kmer_counts')\n # get the genus names\n cnt = 0\n for name, filename in names_ambigous.items():\n cleaned = get_csv_clean(filename)\n full_path = os.path.join(f_pwd, name)\n if os.path.exists(full_path):\n print(f'The path {full_path} exist')\n pass\n else:\n os.makedirs(full_path)\n csv_name = f'{full_path}/{name}_k2_8_chr.csv'\n print(f'Checking the full path {csv_name}')\n with open(csv_name, 'w') as fout:\n for km, cn in cleaned.items():\n fout.write(f'{km},{cn}\\n')\n cnt += 1\n # get final time of the script\n end = time.process_time()\n total_time = end - start_time\n print(f'The script takes {total_time} to finish!')\n print(f'Where read and manipulated {cnt} files')\n print('Done!')", "def main(starttime, hstart, hstop, cfg):\n\n if cfg.target is tools.Target.ICONOEM or cfg.target is tools.Target.ICONART:\n\n logging.info('ICON chemistry data for IC/BC')\n\n # Wait for meteo to finish first\n tools.check_job_completion(cfg.log_finished_dir,\"meteo\")\n\n tools.create_dir(cfg.icon_input_oae, \"online emissions input\")\n tools.create_dir(cfg.icon_input_icbc, \"icon_input_icbc\")\n tools.create_dir(cfg.icon_input_icbc_processed, \"icon_input_icbc_processed\")\n\n starttime_real = starttime + timedelta(hours = hstart)\n\n #-----------------------------------------------------\n # Remap chemistry initial conditions\n #-----------------------------------------------------\n logfile = os.path.join(cfg.log_working_dir, \"ic_chem\")\n logfile_finish = os.path.join(cfg.log_finished_dir,\"ic_chem\")\n\n # Write remap_chem namelist\n in_filename = os.path.join(cfg.input_root_chem,starttime.strftime(cfg.chem_nameformat)+'.grb')\n out_filename = os.path.join(cfg.icon_input,'oae',cfg.oae_chem_init_nc+'_dry.nc')\n in_grid_filename = in_filename\n out_grid_filename = os.path.join(cfg.input_root_grid,cfg.dynamics_grid_filename)\n with open(os.path.join(cfg.case_dir,cfg.icontools_parameter['icontools_namelist_remap_chem'])) as input_file:\n to_write = input_file.read()\n output_nml = os.path.join(cfg.icon_work, 'icontools_remap_chem_ic.namelist')\n with open(output_nml, \"w\") as outf:\n to_write = to_write.format(cfg=cfg,\n in_filename=in_filename,\n out_filename=out_filename,\n in_grid_filename=in_grid_filename,\n out_grid_filename=out_grid_filename)\n outf.write(to_write)\n\n # Write remapfields namelist\n with open(os.path.join(cfg.case_dir,cfg.icontools_parameter['icontools_namelist_remapfields_chem_ic'])) as input_file:\n to_write = input_file.read()\n output_fields = os.path.join(cfg.icon_work, 'icontools_remapfields_chem_ic.namelist')\n with open(output_fields, \"w\") as outf:\n to_write = to_write.format(cfg=cfg)\n outf.write(to_write)\n\n # Write run script (remap_ic.job)\n with open(os.path.join(cfg.case_dir,cfg.icontools_parameter['icontools_remap_chem_ic_runjob'])) as input_file:\n to_write = input_file.read()\n output_run = os.path.join(cfg.icon_work, \"remap_chem_ic.job\")\n with open(output_run, \"w\") as outf:\n outf.write(to_write.format(\n cfg=cfg,\n logfile=logfile, logfile_finish=logfile_finish)\n )\n exitcode = subprocess.call([\"sbatch\", \"--wait\",\n os.path.join(cfg.icon_work, 'remap_chem_ic.job')])\n if exitcode != 0:\n raise RuntimeError(\"sbatch returned exitcode {}\".format(exitcode))\n logging.info(\"Remapped initial conditions with icontools\")\n\n os.remove(output_nml)\n os.remove(output_fields)\n os.remove(output_run)\n\n # Transform initial data from dry to wet mixing ratios\n cdo.expr(\"'CH4w=CH4*(1-QV)'\",input=out_filename,output='temp_file_01.nc')\n cdo.selvar(\"LNSP\",input=out_filename,output='temp_file_03.nc')\n os.remove(out_filename)\n # Rename variable to match ICON internal name with CDO:\n out_filename = os.path.join(cfg.icon_input,'oae',cfg.oae_chem_init_nc)\n cdo.chname(\"CH4w\",\"CH4\",input='temp_file_01.nc',output='temp_file_02.nc')\n cdo.merge(input='temp_file_02.nc temp_file_03.nc',output=out_filename)\n\n os.remove('temp_file_01.nc')\n os.remove('temp_file_02.nc')\n os.remove('temp_file_03.nc')\n \n\n\n #-----------------------------------------------------\n # Remap chem LBC\n #-----------------------------------------------------\n logfile = os.path.join(cfg.log_working_dir, \"lbc_chem\")\n logfile_finish = os.path.join(cfg.log_finished_dir,\"lbc_chem\")\n\n with open(os.path.join(cfg.case_dir,cfg.icontools_parameter['icontools_namelist_remapfields_chem_lbc'])) as input_file:\n to_write = input_file.read()\n output_nml_fields = os.path.join(cfg.icon_work, 'icontools_remapfields_chem_lbc.namelist')\n with open(output_nml_fields, \"w\") as outf:\n to_write = to_write.format(cfg=cfg)\n outf.write(to_write)\n\n for time in tools.iter_hours(starttime, hstart, hstop, cfg.meteo_inc):\n\n # Write remap_lbc namelist\n in_grid_filename = os.path.join(cfg.input_root_chem,starttime.strftime(cfg.chem_nameformat)+'.grb')\n in_filename = os.path.join(cfg.input_root_chem,time.strftime(cfg.chem_nameformat)+'.grb')\n out_grid_filename = os.path.join(cfg.icon_input_grid,cfg.lateral_boundary_grid)\n out_filename = os.path.join(cfg.icon_input_icbc,time.strftime(cfg.chem_nameformat)+'_lbc')\n with open(os.path.join(cfg.case_dir,cfg.icontools_parameter['icontools_namelist_remap'])) as input_file:\n to_write = input_file.read()\n output_nml_lbc = os.path.join(cfg.icon_work, 'icontools_remap_chem_lbc.namelist')\n with open(output_nml_lbc, \"w\") as outf:\n to_write = to_write.format(cfg=cfg,\n in_grid_filename=in_grid_filename,\n in_filename=in_filename,\n out_grid_filename=out_grid_filename,\n out_filename=out_filename)\n outf.write(to_write)\n\n # Write run script (remap_chem_lbc.job)\n with open(os.path.join(cfg.case_dir,cfg.icontools_parameter['icontools_remap_chem_lbc_runjob'])) as input_file:\n to_write = input_file.read()\n output_run = os.path.join(cfg.icon_work, \"remap_chem_lbc.job\")\n with open(output_run, \"w\") as outf:\n outf.write(to_write.format(\n cfg=cfg,\n logfile=logfile, logfile_finish=logfile_finish)\n )\n exitcode = subprocess.call([\"sbatch\", \"--wait\",\n os.path.join(cfg.icon_work, 'remap_chem_lbc.job')])\n if exitcode != 0:\n raise RuntimeError(\"sbatch returned exitcode {}\".format(exitcode))\n logging.info(\"Remapped boundary conditions at {} with icontools\".format(time))\n\n os.remove(output_nml_lbc)\n os.remove(output_run)\n\n os.remove(output_nml_fields)\n\n\n #-----------------------------------------------------\n # Merge chem files with meteo files using cdo\n #-----------------------------------------------------\n\n for time in tools.iter_hours(starttime, hstart, hstop, cfg.meteo_inc):\n\n chem_file = os.path.join(cfg.icon_input_icbc,time.strftime(cfg.chem_nameformat)+'_lbc')\n meteo_file = os.path.join(cfg.icon_input_icbc, time.strftime(cfg.source_nameformat)+'_lbc.nc')\n var_file = os.path.join(cfg.icon_input_icbc, time.strftime(cfg.source_nameformat)+'_lbc_var.nc')\n transform_file = os.path.join(cfg.icon_input_icbc, time.strftime(cfg.source_nameformat)+'_lbc_transform.nc')\n name_file = os.path.join(cfg.icon_input_icbc, time.strftime(cfg.source_nameformat)+'_lbc_name.nc')\n processed_file = os.path.join(cfg.icon_input_icbc_processed, time.strftime(cfg.source_nameformat)+'_lbc.nc')\n\n # Select variable with CDO\n cdo.selvar(\"CH4\",\"QV\",input=chem_file,output=var_file)\n # Transform to wet-mixing ratios with CDO\n cdo.expr(\"'CH4w=CH4*(1-QV)'\",input=var_file,output=transform_file)\n # Rename variable to match ICON internal name with CDO:\n cdo.chname(\"CH4w\",\"oem_tracer_1\",input=transform_file,output=name_file)\n # Merge with CDO\n cdo.merge(input=name_file+' '+meteo_file,output=processed_file)\n\n # Delete temporary files\n os.remove(chem_file)\n os.remove(var_file)\n os.remove(transform_file)\n os.remove(name_file)\n\n logging.info(\"Merged chem variables to file {}\".format(meteo_file))\n\n\n\n # If COSMO (and not ICON):\n else:\n inv_to_process = []\n if cfg.target is tools.Target.COSMOGHG:\n try:\n CAMS = dict(fullname = \"CAMS\",\n nickname = \"cams\",\n executable = \"cams4int2cosmo\",\n indir = cfg.cams_dir_orig,\n outdir = cfg.cams_dir_proc,\n param = cfg.cams_parameters)\n inv_to_process.append(CAMS)\n except AttributeError:\n pass\n try:\n CT = dict(fullname = \"CarbonTracker\",\n nickname = \"ct\",\n executable = \"ctnoaa4int2cosmo\",\n indir = cfg.ct_dir_orig,\n outdir = cfg.ct_dir_proc,\n param = cfg.ct_parameters)\n inv_to_process.append(CT)\n except AttributeError:\n pass\n elif cfg.target is tools.Target.COSMOART:\n try:\n MOZART = dict(fullname = 'MOZART',\n nickname = 'mozart',\n executable = 'mozart2int2lm',\n indir = cfg.mozart_file_orig,\n outdir = cfg.mozart_dir_proc,\n param = [{'inc' : cfg.mozart_inc,\n 'suffix' : cfg.mozart_prefix}])\n inv_to_process.append(MOZART)\n except AttributeError:\n pass\n else:\n # Unknown target\n raise RuntimeError(\"Unknown target: {}\".format(cfg.target))\n\n # TO DO \n #MOZART = dict(fullname=\"MOZART\", nickname=\"mozart\",executable=\"cams4int2cosmo\")\n \n logging.info(\"Processing \" + \", \".join([i[\"fullname\"] for i in inv_to_process])+\" data\")\n\n scratch_path = os.path.join(cfg.int2lm_input,'icbc')\n tools.create_dir(scratch_path, \"icbc input\")\n\n for inv in inv_to_process:\n logging.info(inv[\"fullname\"]+\" files\")\n tools.create_dir(inv[\"outdir\"], \"processed \" + inv[\"fullname\"])\n #process_inv(starttime,hstart,hstop,increment,inv,cfg)\n \n for p in inv[\"param\"]:\n inc = p[\"inc\"]\n for time in tools.iter_hours(starttime, hstart, hstop, inc):\n logging.info(time)\n\n filename = os.path.join(inv[\"outdir\"],p[\"suffix\"]+\"_\"+time.strftime(\"%Y%m%d%H\")+\".nc\")\n if not os.path.exists(filename):\n logging.info(filename)\n try:\n to_call = getattr(tools, inv[\"executable\"])\n to_call.main(time,inv[\"indir\"],inv[\"outdir\"],p)\n except:\n logging.error(\"Preprocessing \"+inv[\"fullname\"] + \" data failed\")\n raise\n\n # copy to (temporary) run input directory\n tools.copy_file(filename, scratch_path)\n\n logging.info(\"OK\")", "def main():\n\n metadata = {}\n\n path_to_input_file = os.path.join('input', 'piano_and_synth_arp_chord_mono.wav')\n metadata['input_file'] = path_to_input_file\n signal = nussl.AudioSignal(path_to_input_file)\n\n # Set random seed in NMF and KMeans to 0\n params = {'num_sources': 2, 'num_templates': 6, 'distance_measure': nussl.transformers.TransformerNMF.EUCLIDEAN,\n 'num_iterations': 10, 'random_seed': 0}\n metadata['params'] = params\n\n nmf_mfcc = nussl.NMF_MFCC(signal, **params)\n\n if DEBUG:\n output_folder = os.path.join('tests', 'nmf_mfcc_reference', 'scratch')\n if not os.path.exists(output_folder):\n os.mkdir(output_folder)\n else:\n output_folder = os.path.join('tests', 'nmf_mfcc_reference', 'nmf_mfcc_benchmark_files')\n\n nmf_mfcc.run()\n np.save(os.path.join(output_folder, 'benchmark_labeled_templates'), nmf_mfcc.labeled_templates)\n np.save(os.path.join(output_folder, 'benchmark_masks'), nmf_mfcc.result_masks)\n\n nmf_mfcc.make_audio_signals()\n\n # Make sure the paths are empty\n for source in nmf_mfcc.sources:\n source.path_to_input_file = ''\n\n np.save(os.path.join(output_folder, 'benchmark_sources'), nmf_mfcc.sources)\n metadata['save_time'] = time.asctime()\n metadata['nussl_version'] = nussl.version\n metadata['made_by'] = platform.uname()[1]\n json.dump(metadata, open(os.path.join(output_folder, 'nmf_mfcc_benchmark_metadata.json'), 'w'))", "def main_loop(csd_profile, csd_seed, total_ele, num_init_srcs=1000):\n csd_name = csd_profile.func_name\n print 'Using sources %s - Seed: %d ' % (csd_name, csd_seed)\n\n #TrueCSD\n t_csd_x, t_csd_y, t_csd_z, true_csd = generate_csd_3D(csd_profile, csd_seed,\n start_x=0., end_x=1., \n start_y=0., end_y=1., \n start_z=0., end_z=1.,\n res_x=100, res_y=100,\n res_z=100)\n\n #Electrodes\n ele_lims = [0.15, 0.85] #square grid, xy min,max limits\n ele_res = int(np.ceil(total_ele**(3**-1))) #resolution of electrode grid\n ele_pos, pots = electrode_config(ele_lims, ele_res, true_csd, t_csd_x, t_csd_y, t_csd_z)\n ele_x = ele_pos[:, 0]\n ele_y = ele_pos[:, 1]\n ele_z = ele_pos[:, 2]\n \n #kCSD estimation\n gdX = 0.05\n gdY = 0.05\n gdZ = 0.05\n x_lims = [.0,1.] #CSD estimation place\n y_lims = [.0,1.]\n z_lims = [.0,1.]\n params = {'h':50., \n 'gdX': gdX, 'gdY': gdY, 'gdZ': gdZ,\n 'xmin': x_lims[0], 'xmax': x_lims[1], \n 'ymin': y_lims[0], 'ymax': y_lims[1],\n 'zmin': y_lims[0], 'zmax': y_lims[1],\n 'ext': 0.0, 'n_srcs_init': num_init_srcs}\n tic = time.time() #time it\n k, est_csd = do_kcsd(ele_pos, pots, h=50., \n gdx=gdX, gdy= gdY, gdz=gdZ,\n xmin=x_lims[0], xmax=x_lims[1], \n ymin=y_lims[0], ymax=y_lims[1],\n zmin=z_lims[0], zmax=z_lims[1],\n n_src_init=num_init_srcs, src_type='step')\n toc = time.time() - tic\n\n #RMS of estimation - gives estimate of how good the reconstruction was\n chr_x, chr_y, chr_z, test_csd = generate_csd_3D(csd_profile, csd_seed,\n start_x=x_lims[0], end_x=x_lims[1],\n start_y=y_lims[0], end_y=y_lims[1],\n start_z=z_lims[0], end_z=z_lims[1],\n res_x=int((x_lims[1]-x_lims[0])/gdX), \n res_y=int((y_lims[1]-y_lims[0])/gdY),\n res_z=int((z_lims[1]-z_lims[0])/gdZ))\n rms = np.linalg.norm(abs(test_csd - est_csd[:,:,:,0]))\n rms /= np.linalg.norm(test_csd)\n\n #Plots\n title = str(k.lambd)+','+str(k.R)+', '+str(k.cv_error)+', '+str(rms)+', '+str(toc)\n save_as = csd_name+'_'+str(csd_seed)+'of'+str(total_ele)\n #save_as = csd_name+'_'+str(num_init_srcs)+'_'+str(total_ele)\n make_plots(title, \n chr_x, chr_y, chr_z, test_csd,\n ele_x, ele_y, ele_z, pots,\n k.estm_x, k.estm_y, k.estm_z, est_csd) \n #save\n result_kcsd = [k.lambd, k.R, k.cv_error, rms, toc]\n return est_csd, result_kcsd", "def carbon_workflow():\n # directory containing tiles: aboveground live woody biomass in 2000\n alwb_dir = \"F:/GFW_ALWBD_2000\"\n hansen_loss_dir = \"F:/Hansen_lossyear\"\n hansen_base_name = 'Hansen_GFC-2017-v1.5_lossyear_<loc_string>.tif'\n kba_shp = \"F:/Global_KBA_poly.shp\"\n\n # aligned rasters go in the temp_dir, but intermediate files go in\n # persistent directory in case there is an interruption\n intermediate_save_dir = \"F:/carbon_intermediate_files\"\n if not os.path.exists(intermediate_save_dir):\n os.makedirs(intermediate_save_dir)\n temp_dir = tempfile.mkdtemp()\n tile_list = [\n os.path.join(alwb_dir, f) for f in\n os.listdir(alwb_dir) if f.endswith('.tif')]\n\n summary_dict = {\n 'global_service_sum': [],\n 'global_service_sum_in_KBA': [],\n 'resolution': [],\n 'n_pixels': [],\n 'n_KBA_pixels': [],\n 'biomass_tile': [],\n }\n # native resolution\n combine_summary_results()\n done_df_csv = [\n f for f in os.listdir(intermediate_save_dir) if\n f.startswith('biomass_KBA_summary')]\n done_df = pandas.read_csv(\n os.path.join(intermediate_save_dir, done_df_csv[0]))\n done_df = done_df[done_df.n_pixels.notnull()]\n done_tiles = done_df[\n done_df['resolution'] == '30m']['biomass_tile'].values\n current_object = 1\n for biomass_tile_path in tile_list:\n print(\"processing tile {} of 280\".format(current_object))\n if os.path.basename(biomass_tile_path) in done_tiles:\n current_object += 1\n continue\n loc_string = os.path.basename(biomass_tile_path)[:8]\n loss_path = os.path.join(\n hansen_loss_dir,\n hansen_base_name.replace('<loc_string>', loc_string))\n kba_raster_path = os.path.join(temp_dir, 'kba.tif')\n pygeoprocessing.new_raster_from_base(\n biomass_tile_path, kba_raster_path, gdal.GDT_Int16,\n [_TARGET_NODATA], fill_value_list=[0])\n # create aligned KBA raster\n print(\"rasterizing aligned KBA raster\")\n pygeoprocessing.rasterize(kba_shp, kba_raster_path, burn_values=[1])\n # calculate zonal stats for tile\n print(\"calculating zonal stats for tile\")\n tile_dict = carbon_kba_summary(\n biomass_tile_path, loss_path, kba_raster_path)\n summary_dict['global_service_sum'].append(\n tile_dict['global_service_sum'])\n summary_dict['global_service_sum_in_KBA'].append(\n tile_dict['global_service_sum_in_KBA'])\n summary_dict['n_pixels'].append(tile_dict['n_pixels'])\n summary_dict['n_KBA_pixels'].append(tile_dict['n_KBA_pixels'])\n summary_dict['resolution'].append('30m')\n summary_dict['biomass_tile'].append(\n os.path.basename(biomass_tile_path))\n current_object += 1\n summary_df = pandas.DataFrame(data=summary_dict)\n csv_path = os.path.join(\n intermediate_save_dir, 'summary_dict_{}.csv'.format(\n current_object))\n summary_df.to_csv(csv_path, index=False)\n\n # 10 km resolution\n summary_dict = {\n 'global_service_sum': [],\n 'global_service_sum_in_KBA': [],\n 'resolution': [],\n 'n_pixels': [],\n 'n_KBA_pixels': [],\n 'biomass_tile': [],\n }\n kba_10km_path = \"C:/Users/ginge/Dropbox/KBA_ES/Global_KBA_10km.tif\"\n target_pixel_size = pygeoprocessing.get_raster_info(\n kba_10km_path)['pixel_size']\n # biomass masked and aggregated to ~10 km in R\n biomass_resampled_dir = \"C:/Users/ginge/Desktop/biomass_working_dir/GFW_ALWBD_2015_10km_resample\"\n aligned_dir = os.path.join(temp_dir, 'aligned')\n os.makedirs(aligned_dir)\n\n tile_basename_list = [\n f for f in os.listdir(biomass_resampled_dir) if f.endswith('.tif')]\n current_object = 1\n done_tiles = done_df[\n done_df['resolution'] == '10km']['biomass_tile'].values\n for biomass_tile_bn in tile_basename_list:\n if biomass_tile_bn in done_tiles:\n continue\n native_path = os.path.join(alwb_dir, biomass_tile_bn)\n resampled_path = os.path.join(biomass_resampled_dir, biomass_tile_bn)\n input_path_list = [native_path, resampled_path, kba_10km_path]\n\n aligned_native_path = os.path.join(aligned_dir, 'native_biomass.tif')\n aligned_resampled_path = os.path.join(aligned_dir, biomass_tile_bn)\n aligned_kba_path = os.path.join(aligned_dir, 'kba.tif')\n target_raster_path_list = [\n aligned_native_path, aligned_resampled_path, aligned_kba_path]\n\n print(\"processing 10km tile {} of {}\".format(\n current_object, len(tile_basename_list)))\n align_bounding_box = pygeoprocessing.get_raster_info(\n native_path)['bounding_box']\n pygeoprocessing.align_and_resize_raster_stack(\n input_path_list, target_raster_path_list,\n ['near'] * len(input_path_list), target_pixel_size,\n bounding_box_mode=align_bounding_box)\n\n tile_dict = carbon_kba_summary_no_loss(\n aligned_resampled_path, aligned_kba_path)\n summary_dict['global_service_sum'].append(\n tile_dict['global_service_sum'])\n summary_dict['global_service_sum_in_KBA'].append(\n tile_dict['global_service_sum_in_KBA'])\n summary_dict['n_pixels'].append(tile_dict['n_pixels'])\n summary_dict['n_KBA_pixels'].append(tile_dict['n_KBA_pixels'])\n summary_dict['resolution'].append('10km')\n summary_dict['biomass_tile'].append(biomass_tile_bn)\n current_object += 1\n # if current_object % 10 == 0:\n summary_df = pandas.DataFrame(data=summary_dict)\n csv_path = os.path.join(\n intermediate_save_dir, 'summary_dict_{}.csv'.format(\n current_object))\n summary_df.to_csv(csv_path, index=False)\n\n combine_summary_results()", "def run_process(hrc):\n#\n#--- set conditions for either hrc-i or hrc s\n#\n if hrc == 'hrc_i':\n out_list = 'hrc_i_list'\n data_dir = '/data/hrc/i/'\n inst = 'i'\n else:\n out_list = 'hrc_s_list'\n data_dir = '/data/hrc/s/'\n inst = 's'\n#\n#--- make a list of obsids\n#\n cmd = 'ls -d ' + data_dir + '* > ' + zspace\n os.system(cmd)\n data = mcf.read_data_file(zspace, remove=1)\n hlist = []\n for ent in data:\n atemp = re.split('\\/', ent)\n obsid = atemp[-1]\n if mcf.is_neumeric(obsid):\n hlist.append(obsid)\n\n# if hrc == 'hrc_i':\n# print(\"HRC I : \" + str(hlist))\n# else:\n# print(\"HRC S : \" + str(hlist))\n# \n for obsid in hlist:\n obsid = str(int(float(obsid)))\n\n with open(out_list, 'w') as fo:\n fo.write(str(obsid) + '\\n')\n cmd = 'rm -rf ' + data_dir + obsid + \"analysis/*\"\n os.system(cmd)\n#\n#--- extract fits data needed for analysis\n#\n chk = extract_hrc_data(obsid, data_dir)\n if chk == False:\n print(\"Not all data are available\")\n continue\n\n if hrc == 'hrc_i':\n cmd = 'csh -f ' + bin_dir + 'repro_all_new.csh hrc_i_list'\n else:\n cmd = 'csh -f ' + bin_dir + 'repro_all_S_new.csh hrc_s_list'\n\n try:\n run_ciao(cmd)\n cdir = data_dir + '/' + str(obsid)\n if os.path.isdir(cdir):\n cmd = 'chgrp -R hat ' + cdir \n os.system(cmd)\n cmd = 'chmod -R 775 ' + cdir \n os.system(cmd)\n#\n#--- directory name should be 5 digit\n#\n test = int(float(obsid))\n if test < 10000:\n chk = mcf.add_leading_zero(obsid, 5)\n odir = data_dir + '/' + str(chk)\n if os.path.isdir(odir):\n cmd = 'rm -rf ' + odir\n os.system(cmd)\n cmd = 'mv ' + cdir + ' ' + odir\n os.system(cmd)\n else:\n cmd = 'mv ' + cdir + ' ' + odir\n os.system(cmd)\n except:\n pass\n\n mcf.rm_files(out_list)\n correct_naming(obsid, inst)\n\n #chk_proccess_status(inst, hlist)", "def test_process_CHiC():\n\n path = os.path.join(os.path.dirname(__file__), \"data/\")\n\n input_files = {\n \"fastq1\" : path + \"test_truncater/SRR3535023_1.fastq\",\n \"fastq2\" : path + \"test_truncater/SRR3535023_2.fastq\",\n \"genome_fa\" : path + \"test_baitmap/chr21_hg19.fa\",\n \"genome_idx\" : path + \"test_baitmap/bwa.tar.gz\",\n \"probes_fa\" : path + \"test_baitmap/baits.fa\",\n \"Rtree_file_dat\" : path + \"test_rmap/rtree_file.dat\",\n \"Rtree_file_idx\" : path + \"test_rmap/rtree_file.idx\",\n \"chr_handler\" : path + \"test_baitmap/chr_handler.txt\",\n \"bowtie_gen_idx\" : path + \"test_baitmap/chr21_hg19.fa.bt2.tar.gz\",\n \"RMAP\" : path + \"test_run_chicago/test.rmap\",\n \"BAITMAP\": path + \"test_run_chicago/test.baitmap\",\n \"hicup_outdir_tar\" : path + \"test_hicup/output.tar\",\n \"chinput\": [\n path + \"test_run_chicago/data_chicago/GM_rep1.chinput\",\n path + \"test_run_chicago/data_chicago/GM_rep2.chinput\"\n ],\n \"setting_file\" : path + \"test_run_chicago/data_chicago/sGM12878.settingsFile\",\n \"rmap_chicago\" : path + \"test_run_chicago/data_chicago/h19_chr20and21.rmap\",\n \"baitmap_chicago\" : path + \"test_run_chicago/data_chicago/h19_chr20and21.baitmap\",\n \"nbpb_chicago\" : path + \"test_run_chicago/data_chicago/h19_chr20and21.nbpb\",\n \"poe_chicago\" : path + \"test_run_chicago/data_chicago/h19_chr20and21.poe\",\n \"npb_chicago\" : path + \"test_run_chicago/data_chicago/h19_chr20and21.npb\",\n\n }\n\n\n output_files = {\n \"RMAP\" : path + \"test_run_chicago/test.rmap\",\n \"Rtree_file_dat\" : path + \"test_rmap/rtree_file.dat\",\n \"Rtree_file_idx\" : path + \"test_rmap/rtree_file.idx\",\n \"bait_sam\" : path + \"test_baitmap/baits.sam\",\n \"out_bam\" : path + \"test_baitmap/baits.bam\",\n \"out_baitmap\" : path + \"test_run_chicago/test.baitmap\",\n \"hicup_outdir_tar\" : path + \"test_hicup/output.tar\",\n \"nbpb\" : path + \"test_run_chicago/test.nbpb\",\n \"npb\" : path + \"test_run_chicago/test.npb\",\n \"poe\" : path + \"test_run_chicago/test.poe\",\n \"chinput\" : path + \"test_bam2chicago_tool/output_chinput.chinput\",\n \"output\": path + \"test_run_chicago/data_chicago/out_run_chicago.tar\",\n \"chr_handler\" : path + \"test_baitmap/chr_handler.txt\"\n }\n\n configuration = {\n \"renzime\" : {\"HindIII\" : 'A|AGCTT'},\n \"hicup_renzyme\" : \"A^AGCTT,HindIII\",\n \"genome_name\" : \"test_hg19\",\n \"hicup_bowtie2_loc\": \"/usr/bin/bowtie2\",\n \"hicup_longest\": \"800\",\n \"hicup_shortest\": \"150\",\n \"hicup_outdir\": path + \"test_hicup/output\",\n \"hicup_zip\": \"True\",\n \"makeDesignFiles_minFragLen\" : \"150\",\n \"makeDesignFiles_maxFragLen\" : \"40000\",\n \"makeDesignFiles_maxLBrownEst\" : \"1500000\",\n \"makeDesignFiles_binsize\" : \"20000\",\n \"makeDesignFiles_removeb2b\" : True,\n \"makeDesignFiles_removeAdjacent\" : True,\n \"makeDesignFiles_outfilePrefix\" : path + \"test_run_chicago/test\",\n #\"makeDesignFiles_designDir\" : path + \"test_run_chicago\",\n \"makeDesignFiles_rmap\" : path + \"test_run_chicago/test.rmap\",\n \"makeDesignFiles_baitmap\" : path + \"test_run_chicago/test.baitmap\",\n \"chicago_design_dir\": path + \"/test_run_chicago/data_chicago\",\n \"chicago_print_memory\": \"None\",\n \"chicago_out_prefix\" : \"output_test\",\n \"chicago_cutoff\": \"5\",\n \"chicago_export_format\": \"washU_text\",\n \"chicago_export_order\": \"None\",\n \"chicago_rda\": \"None\",\n \"chicago_save_df_only\": \"None\",\n \"chicago_examples_prox_dist\": \"1e6\",\n \"chicago_examples_full_range\": \"None\",\n \"chicago_en_feat_files\": \"None\",\n \"chicago_en_min_dist\": \"0\",\n \"chicago_en_max_dist\": \"1e6\",\n \"chicago_en_full_cis_range\": \"None\",\n \"chicago_en_sample_no\": \"100\",\n \"chicago_en_trans\": \"None\",\n \"chicago_features_only\": \"None\"\n }\n\n input_metadata = {\n \"fastq1\": Metadata(\n data_type=\"text\",\n file_type=\"fastq\",\n file_path=input_files[\"fastq1\"],\n sources=\"\",\n taxon_id=9606,\n\n meta_data=\"\"\n ),\n \"fastq2\": Metadata(\n data_type=\"text\",\n file_type=\"fastq\",\n file_path=input_files[\"fastq2\"],\n sources=\"\",\n taxon_id=9606,\n meta_data=\"\"\n ),\n \"genome_fa\" : Metadata(\n data_type=\"text\",\n file_type=\"fasta\",\n file_path=input_files[\"genome_fa\"],\n sources=\"\",\n taxon_id=9606,\n meta_data=\"GRCh38\",\n ),\n \"genome_idx\" : Metadata(\n \"index_bwa\", \"\", input_files[\"genome_fa\"],\n {\n \"assembly\": \"test\",\n \"tool\": \"bwa_indexer\"\n }\n ),\n \"probes_fa\" : Metadata(\n \"C-HiC probes\", \"fasta\", path + \"test_baitmap/baits.fa\",\n None, None, 9606),\n\n \"Rtree_file_dat\" : Metadata(\n \"Rtree files\", [\".dat\", \".idx\"], path + \"test_rmap/rtree_file\",\n {\"genome\" : path + \"test_rmap/chr21_hg19.fa\",\n \"RE\" : {\"HindIII\" : 'A|AGCTT'}},\n None, 9606\n ),\n\n \"Rtree_file_idx\" : Metadata(\n\n \"Rtree files\", [\".dat\", \".idx\"], path + \"test_rmap/rtree_file\",\n {\"genome\" : path + \"test_rmap/chr21_hg19.fa\",\n \"RE\" : {\"HindIII\" : 'A|AGCTT'}},\n None, 9606\n ),\n \"RMAP\" : Metadata(\n \"data_chicago_input\", \".rmap\",\n path + \"test_run_chicago\", None, {}, 9606),\n \"BAITMAP\" : Metadata(\n \"data_chicago_input\", \".baitmap\",\n path + \"test_run_chicago\", None, {}, 9606),\n\n \"hicup_outdir_tar\" : Metadata(\n \"TAR\", \"CHiC_data\", path + \"/SRR3535023_1_2.hicup.bam\",\n {\"fastq1\" : \"SRR3535023_1.fastq\",\n \"fastq2\" : \"SRR3535023_2.fastq\", \"genome\" : \"human_hg19\"},\n 9606),\n \"chinput\" : Metadata(\n \"data_chicago\", \"chinput\", [], None, None, 9606)\n }\n\n chic_hdl = process_CHiC(configuration)\n chic_hdl.run(input_files, input_metadata, output_files)\n\n #assert makeRmap_Tool.py\n assert os.path.getsize(output_files[\"Rtree_file_dat\"]) > 0\n assert os.path.getsize(output_files[\"Rtree_file_idx\"]) > 0\n\n #assert makeBaitmap.py\n assert os.path.getsize(output_files[\"out_bam\"]) > 0\n assert os.path.getsize(output_files[\"out_baitmap\"]) > 0\n\n #assert hicup\n assert os.path.isfile(output_files[\"hicup_outdir_tar\"]) is True\n assert os.path.getsize(output_files[\"hicup_outdir_tar\"]) > 0\n\n assert os.path.isfile(path + \"test_run_chicago/test\" + \".nbpb\") is True\n assert os.path.getsize(path + \"test_run_chicago/test\" + \".nbpb\") > 0\n\n assert os.path.isfile(path + \"test_run_chicago/test\" + \".npb\") is True\n assert os.path.getsize(path + \"test_run_chicago/test\" + \".npb\") > 0\n\n assert os.path.isfile(path + \"test_run_chicago/test\" + \".poe\") is True\n assert os.path.getsize(path + \"test_run_chicago/test\" + \".poe\") > 0\n\n assert os.path.isfile(output_files[\"chinput\"]) is True\n assert os.path.getsize(output_files[\"chinput\"]) > 0\n\n assert os.path.isfile(output_files[\"output\"]) is True\n\n assert os.path.getsize(output_files[\"output\"]) > 0", "def main():\n\n # Read control file and assign values\n import sys\n control_file = None if len(sys.argv) != 2 else sys.argv[1]\n if control_file is not None:\n control = cfg.read_yaml(control_file)\n logging.info(\"Control File: \" + str(control_file) + \" successfully read\")\n else:\n raise RuntimeError(\"A control file must be provided for camps_metar_to_nc. Exiting program.\")\n\n # Read contents of control file\n date_range = control['date_range']\n input_data = control['input_data']\n output = control['output']\n debug_level = control['debug_level']\n log_file = control['log_file']\n err_file = control['err_file']\n station_list = control['station_list']\n station_table = control['station_table']\n qc_flag = control['qc_flag']\n pickle = control['pickle']\n\n num_procs = control['num_processors']\n os.environ['NUM_PROCS'] = str(num_procs)\n num_procs = check_num_procs(num_procs)\n\n if log_file:\n out_log = open(log_file, 'w+')\n sys.stdout = out_log\n sys.stderr = out_log\n\n try:\n logging.getLogger('').handlers = []\n level = logging.getLevelName(debug_level)\n logging.basicConfig(level=level)\n except:\n print(\"Logging setup failed\")\n raise\n\n logging.info(\"Starting main\")\n\n dates = util.generate_date_range(date_range)\n stn_lst = util.read_station_list(station_list)\n stn_lst,stn_tbl = util.read_station_table(station_table,stn_lst)\n\n # This will read the CSV files and put them into stations\n reader = read_obs(input_data,dates,stn_tbl,stn_lst,qc_flag)\n\n # Convert all arrays to numpy arrays\n logging.info(\"Converting station arrays to numpy arrays\")\n stations = reader.station_list\n stations = convert_to_numpy(stations)\n fix_rounding_errors(stations)\n\n # Optionally pickle and save\n if pickle:\n logging.info(\"Pickling\")\n save_object(stations, 'stations.pkl')\n\n # Check if QC is to be performed\n if control.qc_flag:\n stations = qc_main.qc(stations,err_file)\n # Take off the start and end times from the arrays\n remove_time_buffer(stations)\n\n # Sort stations by station name\n stations = OrderedDict(sorted(stations.items()))\n\n # Create Output filename\n filename = output\n\n dimensions = cfg.read_dimensions()\n num_stations = dimensions['nstations']\n time_dim = dimensions['time']\n\n # Format each observation into a 2D array with\n # dimensions of # of stations and # of times\n if pickle:\n logging.info(\"Pickling\")\n save_object(stations, 'postqc.pkl')\n logging.info(\"Construct 2D observation arrays\")\n camps_data = []\n example_station = list(stations.values())[0]\n obs = list(example_station.observations.keys())\n obs.remove('TIME')\n start_time = dates[0]\n end_time = dates[-1]\n logging.info(\"start time: \" + start_time)\n logging.info(\"end time: \" + end_time)\n\n met_to_nc = cfg.read_metar_nc_lookup()\n for metar_name in obs:\n # Set the observation name to the standard CAMPS name\n try:\n observation_name = met_to_nc[metar_name]\n except:\n logging.error(\"Cannot find the netcdf equivalent of \" +\n metar_name +\n \"in METAR lookup table. Skipping.\")\n continue\n\n # Loop through the stations and stitch together the current observation\n temp_obs = []\n for station_name, cur_station in stations.items():\n if 'latitude' in observation_name:\n temp_obs.append(list(np.repeat(stn_tbl[station_name]['lat'],len(dates))))\n elif 'longitude' in observation_name:\n temp_obs.append(list(np.repeat(stn_tbl[station_name]['lon'],len(dates))))\n else:\n temp_obs.append(cur_station.get_obs(metar_name))\n obs_data = np.array(temp_obs)\n logging.info(observation_name)\n\n # Construct Camps data object\n camps_obj = Camps_data(observation_name)\n try:\n camps_obj.metadata['vertical_coord'] = camps_obj.metadata.pop('coordinates')\n except:\n pass\n if camps_obj.is_feature_of_interest() and len(obs_data.shape)>1:\n obs_data = obs_data[:,0]\n camps_obj.set_dimensions((num_stations,))\n else:\n camps_obj.set_dimensions((time_dim,num_stations))\n camps_obj.add_data(obs_data)\n camps_obj.add_source('METAR')\n camps_obj.add_process('DecodeBUFR')\n if qc_flag: camps_obj.add_process('METARQC')\n camps_obj.change_data_type()\n\n # Again check for time bounds, pass extra info to add_time if\n # there are time bounds\n if not camps_obj.is_feature_of_interest():\n if camps_obj.has_time_bounds():\n hours = camps_obj.properties['hours']\n camps_obj.metadata['hours'] = hours\n camps_obj.time = add_time(start_time, end_time, time_bounds=hours)\n else:\n camps_obj.time = add_time(start_time, end_time)\n\n # Transpose the array and swap dimension names. Note that this may be a\n # temporary solution.\n if len(camps_obj.data.shape)>1:\n camps_obj.data = np.transpose(camps_obj.data)\n\n camps_data.append(camps_obj)\n\n camps_obj = pack_station_names(list(stations.keys()))\n camps_obj.add_source('METAR')\n camps_data.append(camps_obj)\n\n if qc_flag:\n extra_globals = {\"source\": \"Data from METAR with MDL Quality Control\"}\n else:\n extra_globals = {\"source\": \"Data from METAR (No MDL Quality Control)\"}\n\n # TEMPORARY: Need to perform 2 actions here. We should do this elsewhere, but here for now...\n #\n # 1) Unscale precip obs. Precip obs in MDL hourly table are units of hundreths of inches\n # (i.e. 1.00 inches is 100).\n # 2) Trace amounts in the MDL hourly table are coded as -4. Here we need to set these\n # to a \"defined\" trace amount as float of value 0.004.\n for c in camps_data:\n if \"precipitation_amount\" in c.standard_name:\n c.data = np.where(np.logical_and(c.data>=0.0,c.data<9999.),c.data/100.0,c.data)\n c.data = np.where(c.data==-4,np.float32(0.004),c.data)\n # TEMPORARY\n\n # Write into netCDF4 file\n writer.write(camps_data, filename, extra_globals)\n if log_file:\n out_log.close()", "def generate_megafile():\n\n print(\"\\nFetching testing dataset…\")\n testing = get_testing()\n\n print(\"\\nFetching ECDC dataset…\")\n ecdc = get_ecdc()\n\n location_mismatch = set(testing.location).difference(set(ecdc.location))\n for loc in location_mismatch:\n print(f\"<!> Location '{loc}' has testing data but is absent from ECDC data\")\n\n print(\"\\nFetching OxCGRT dataset…\")\n cgrt = get_cgrt()\n\n all_covid = (\n ecdc\n .merge(testing, on=[\"date\", \"location\"], how=\"outer\")\n .merge(cgrt, on=[\"date\", \"location\"], how=\"left\")\n .sort_values([\"location\", \"date\"])\n )\n\n # Add ISO codes\n print(\"Adding ISO codes…\")\n iso_codes = pd.read_csv(os.path.join(INPUT_DIR, \"iso/iso3166_1_alpha_3_codes.csv\"))\n\n missing_iso = set(all_covid.location).difference(set(iso_codes.location))\n if len(missing_iso) > 0:\n print(missing_iso)\n raise Exception(\"Missing ISO code for some locations\")\n\n all_covid = iso_codes.merge(all_covid, on=\"location\")\n\n # Add continents\n print(\"Adding continents…\")\n continents = pd.read_csv(\n os.path.join(INPUT_DIR, \"owid/continents.csv\"),\n names=[\"_1\", \"iso_code\", \"_2\", \"continent\"],\n usecols=[\"iso_code\", \"continent\"],\n header=0\n )\n\n all_covid = continents.merge(all_covid, on=\"iso_code\", how=\"right\")\n\n # Add macro variables\n # - the key is the name of the variable of interest\n # - the value is the path to the corresponding file\n macro_variables = {\n \"population\": \"un/population_2020.csv\",\n \"population_density\": \"wb/population_density.csv\",\n \"median_age\": \"un/median_age.csv\",\n \"aged_65_older\": \"wb/aged_65_older.csv\",\n \"aged_70_older\": \"un/aged_70_older.csv\",\n \"gdp_per_capita\": \"wb/gdp_per_capita.csv\",\n \"extreme_poverty\": \"wb/extreme_poverty.csv\",\n \"cardiovasc_death_rate\": \"gbd/cardiovasc_death_rate.csv\",\n \"diabetes_prevalence\": \"wb/diabetes_prevalence.csv\",\n \"female_smokers\": \"wb/female_smokers.csv\",\n \"male_smokers\": \"wb/male_smokers.csv\",\n \"handwashing_facilities\": \"un/handwashing_facilities.csv\",\n \"hospital_beds_per_thousand\": \"owid/hospital_beds.csv\",\n \"life_expectancy\": \"owid/life_expectancy.csv\",\n \"human_development_index\": \"un/human_development_index.csv\",\n }\n all_covid = add_macro_variables(all_covid, macro_variables)\n\n print(\"Writing to CSV…\")\n all_covid.to_csv(os.path.join(DATA_DIR, \"owid-covid-data.csv\"), index=False)\n\n print(\"Writing to XLSX…\")\n all_covid.to_excel(os.path.join(DATA_DIR, \"owid-covid-data.xlsx\"), index=False)\n\n print(\"Writing to JSON…\")\n df_to_json(all_covid, os.path.join(DATA_DIR, \"owid-covid-data.json\"), macro_variables.keys())\n\n # Store the last updated time\n timestamp_filename = os.path.join(DATA_DIR, \"owid-covid-data-last-updated-timestamp.txt\")\n with open(timestamp_filename, \"w\") as timestamp_file:\n timestamp_file.write(datetime.utcnow().replace(microsecond=0).isoformat())\n\n print(\"All done!\")", "def main(folder, quiet=0):\n\n if quiet:\n output_stream = StringIO()\n else:\n output_stream = sys.stdout\n\n\n\n color1 = \"I4\" #filter system for first color of CMD\n color2 = \"M1\" #filter system for second color of CMD\n zeromagc1 = zero.zero_mag[color1]\n zeromagc2 = zero.zero_mag[color2]\n min_mag = 8. #minimal observation limit\n max_mag = 0. #maximal observation limit\n\n#getting file list\n files = sorted(os.listdir('%s/%s' % (os.getcwdu(), folder))) \n out = []\n\n for fil in files:\n#only using files created by the automated simulation\n if fil.startswith('sim_') and not 'settings' in fil.encode(\"ascii\"):\n print(\"%s/%s\" % (folder,fil.encode(\"ascii\")), file=output_stream)\n \n\n # Read in\n hdulist = fits.open('%s/%s' %(folder,fil))\n data = hdulist[1].data\n\n #calculating magnitudes from fluxes and converting to CMD-data\n x = -2.5*(np.log10(data['c%s' % color1]/zeromagc1) - np.log10(data['c%s' % color2]/zeromagc2))\n y = -2.5*(np.log10(data['c%s' % color2]/zeromagc2))\n\n \n sel = np.logical_and( (y > -10./3. * (x-1.) + 10.), np.logical_and(max_mag < y, y < min_mag))\n sel = np.logical_and(sel, y < -x + 12.)\n n = sum(sel)\n t = Table(hdulist[1].data)\n if 'sel' in t.columns:\n t.remove_column('sel')\n t.add_column(Column(name='sel', data=sel.astype('int')))\n \n hdulist[1].data = np.array(t)\n tmp, av, apera, age = fil.split('_')\n fits.update('%s/%s' %(folder,fil), np.array(t), ext = 1, clobber=True)\n out.append([av, apera, age, n])\n\n #writing obtained data to \"folder/__expected_number\"\n head = ['#', 'AV', 'Aperature_size', 'Age', 'Expected_number']\n f = open('%s/__expected_number' % folder, 'w')\n f.write(','.join(head)+'\\n' )\n np.savetxt(f, np.asarray(out).astype(int))\n f.close()\n \n print (\"Analysed %s files and saved output to %s\" % (len(out),'%s/__expected_number' % folder), file=output_stream)", "def main():\n parser = argparse.ArgumentParser(description='Generates Censys jobs')\n\n parser.add_argument('--home', dest='home',\n help='homedir')\n\n parser.add_argument('--data', dest='data',\n help='data dir for the job')\n\n parser.add_argument('--jobs-dir', dest='jobsdir', default='.',\n help='dir to put jobs to')\n\n parser.add_argument('--wrapper', dest='wrapper',\n help='python script wrapper')\n\n parser.add_argument('file', nargs=argparse.ZERO_OR_MORE, default=[],\n help='censys link file')\n\n args = parser.parse_args()\n\n # Process the input\n if len(args.file) == 0:\n print('Error; no input given')\n sys.exit(1)\n\n if not os.path.exists(args.jobsdir):\n utils.make_or_verify_dir(args.jobsdir)\n\n dataset_idx = 10\n datasets = []\n\n for file_name in args.file:\n logger.info('Processing %s' % file_name)\n\n code = 'fullipv4'\n if 'alexa' in file_name:\n code = 'alexa'\n\n logdir = os.path.join(args.home, 'logs')\n if not os.path.exists(logdir):\n os.makedirs(logdir, 0o775)\n\n with open(file_name, 'r') as fh:\n js = json.load(fh)\n for dataset in js['data']:\n id = dataset['id']\n log_file = os.path.abspath(os.path.join(logdir, '%s_%s_%03d.log' % (os.getpid(), code, int(id))))\n\n job = '#!/bin/bash\\n'\n job += 'cd %s\\n' % args.home\n job += 'stdbuf -eL %s --debug --link-file \"%s\" --link-idx %d --data \"%s\" --continue --sec 2> \"%s\" \\n' \\\n % (os.path.abspath(args.wrapper),\n os.path.abspath(file_name), id, args.data, log_file)\n\n jobfile_path = os.path.join(args.jobsdir, '%s-%05d.sh' % (code, id))\n with open(jobfile_path, 'w') as jh:\n jh.write(job)", "def run(goal, time, control_alg, cool_down):\n global x_old\n global current_DC\n global numloops\n global delta_t\n start_time = timey.time()\n timeforfile = datetime.now()\n timeforfile = timeforfile.strftime(\"%Y-%m-%d - %H:%M\") \n filename = timeforfile + str(control_alg.__name__) + '.csv'\n filename = filename.replace(' ', '')\n \n with open(filename, 'a', newline = '') as file:\n pass\n\n current_DC = 0\n\n while(True):\n loop_start = timey.time()\n datarow = [0] \n current_time=abs(start_time-timey.time())\n datarow[0] = round(current_time,2)\n\t\n for i in range(len(rtd.RTD_list)): #recording new temperature values of the RTDs \n if i!=2 and i!=4: #channel 2 on Rev1.2 is not functional. Channel 4 is being used for the cold plate. Ignoring both \n datarow.append(rtd.get_temp(i))\n\t\n \n #printing out values every two seconds\n if numloops%4 == 0: \n print('t = ',datarow[0])\n ## #.format(*RTD_val)) #0-1023 value from MCP3008\n print('|t={0:^5}|{1:^7}|{2:^7}|{3:^7}|{4:^7}|{5:^7}|{6:^7}'.format(*datarow)) #temperature calculated from raw data.\n\n\n \n #Calculating the average temperature of the board\n sum_temps = 0.0\n for i in range(1,len(datarow)):\n sum_temps += datarow[i] \n average_temp = sum_temps/6.0 #divide by number of RTDs on board\n datarow.append(average_temp)\n if numloops==0:\n x_old=average_temp #to make the filter work\n filtered_temp = laplacefilter(average_temp)\n datarow.append(filtered_temp)\n print('Filtered temp: ', filtered_temp)\n print('Average temp: ', average_temp)\n plate_temp = rtd.get_temp(4) #currently the cold plate RTD is connected to channel 4\n print('Cold plate temp: ', plate_temp)\n datarow.append(plate_temp)\n stop_heat=False\n if current_time>=time:\n stop_heat=True\n if not stop_heat:\n if abs(filtered_temp-average_temp)<.5: #checking if filter has caught up to actual data\n current_DC=control_alg(filtered_temp, goal)\n else: \n current_DC=control_alg(average_temp, goal)\n\n elif cool_down and stop_heat:\n heater.ChangeDutyCycle(0)\n current_DC=0 \n if current_time>=time+1200: #cooldown is set to 30 seconds\n raise KeyboardInterrupt('cool_down complete')\n else:\n raise KeyboardInterrupt('heat test done')\n print('Current DC:', current_DC)\n \n datarow.append(current_DC)\n\n with open(filename, 'a', newline = '') as file:\n dat = csv.writer(file)\n dat.writerow(datarow)\n \n print(\"_\"*72)\n print(\"\") \n \n loop_end = timey.time()\n sleep(delta_t - (loop_end - loop_start)) # Sleeps for exactly delta_t minus code runtime\n numloops += 1", "def main():\n # if the log directory is not exitsted, create it first.\n global dump_log\n d = os.path.dirname(dump_logfile)\n if not os.path.exists(d):\n os.makedirs(d)\n\n transfer_output_to_server(LOG_SERVER, LOG_USER, KEY_FILE, OUTPUT_DIR, TARGET)\n\n excluded_tables = dict()\n\n for ds in DATA_SOURCES:\n excluded_tables[ds] = []\n\n if EXCLUDED_TABLES:\n dbt_list = EXCLUDED_TABLES.split(',')\n for dbt in dbt_list:\n t_list = dbt.split('-')\n if t_list[0] in excluded_tables.keys():\n excluded_tables[t_list[0]].append(t_list[1])\n\n table_list = dict()\n prev_data_sources = dict()\n for ds in DATA_SOURCES:\n tables = get_tables(ds)\n table_list[ds] = tables\n table_dict = dict()\n for table in tables:\n table_dict[table] = None\n prev_data_sources[ds] = table_dict\n\n for i in range(0, COUNT):\n\n dump_log = open(dump_logfile,'a')\n log(\"Trial %s/%s\" % (i+1, COUNT))\n dump_datasources(prev_data_sources, table_list, excluded_tables)\n dump_log.close()\n\n time.sleep(FREQUENCY)", "def main():\n\tdb, cursor = connect()\n\t#chroms = ['1','22']\n\t#chroms = ['2','21']\n\t#chroms = ['3','20']\n\t#chroms = ['4','19']\n\t#chroms = ['5','18']\n\t#chroms = ['6','17']\n\t#chroms = ['7','16']\n\t#chroms = ['8','15']\n\t#chroms = ['9','14']\n\t#chroms = ['10','13']\n\tchroms = ['11','12']\n\t#chroms = [str(i) for i in range(10,23)]\n\t#chroms = ['X','Y']\n\tchroms.reverse()\n\tfor chrom in chroms:\n\t\tt0 = time()\n\t\ttable = \"gnomad_freqs_chr_\" + chrom\n\t\tprint\n\t\tprint \"*\"*20\n\t\tprint table\n\t\tprint \"number of variants:\", search_db(cursor, \"select count(1) from %s\" % table)[0][0]\n\t\tqry = \"select count(1) from %s \" % table\n\t\tqry += \"where char_length(reference)=1 and char_length(variant)=1\"\n\t\tprint \"simple SNPs\", search_db(cursor, qry)[0][0]\n\n\t\tcandidates, long_vars_ct = find_complex_variants(cursor, table)\n\t\tprint\n\t\tprint \"Complex variants with reference<30:\", len(candidates),\n\t\tprint \" long variants: \", long_vars_ct\n\n\t\tclusters = find_clusters_of_candidates(candidates)\n\t\tprint\n\t\tprint \"Done clustering. Max pos:\", max([cluster[0][0] for cluster in clusters])\n\t\tprint \"Number of hotspot regions:\", len(clusters)\n\n\n\t\tnumber_of_vars_in_clusters = 0\n\t\tnumber_of_clusters_with_periodic_motifs = 0\n\t\tfor cluster in clusters:\n\t\t\t# no varaints: cluster is just the number of positions here, not the number of\n\t\t\t# vars repoted for each\n\t\t\t[start,end, number_of_variants] = characterize_region(cluster)\n\t\t\tif number_of_variants<2: continue\n\t\t\tnumber_of_vars_in_clusters += number_of_variants\n\t\t\tfixed_fields = {'chrom':chrom, 'start':start, 'end':end}\n\t\t\tstore_without_checking(cursor, 'gnomad_hotspots', fixed_fields)\n\t\tprint\n\t\tprint \"Number of variants with clusters:\", number_of_vars_in_clusters\n\t\tprint \"Number of clusters with periodic motifs:\", number_of_clusters_with_periodic_motifs\n\t\tprint\n\t\tprint \"time taken %.2f min\" % ((time() - t0) / 60.0)\n\t\tprint\n\tcursor.close()\n\tdb.close()\n\n\treturn", "def run_create_job_files():\n capture_efficiency = [i/20 for i in range(21)] \n histidine_init_conc = [i/10 for i in range(14)] # Saturating concentration is 1.2. We go up to 1.3\n SUCRe_atp = [i/2 for i in range(51)] \n\n # Total combinations for simulating histidine\n total_comb_his = len(capture_efficiency)*len(histidine_uptake_rate)\n total_comb_atp = len(capture_efficiency)*len(SUCRe_atp)\n interval_size_his = 100\n interval_size_atp = 100\n print '\\ntotal_comb_his = {} , interval_size_his = {} total_comb_atp = {} , interval_size_atp = {}\\n'.format(total_comb_his,interval_size_his,total_comb_atp,interval_size_atp)\n \n # his, FixedO2\n print '\\n--- Creating job files for his, fixedo2 ---'\n create_job_files(total_comb_num = total_comb_his, interval_size = interval_size_his, job_filename_base = 'jobs/job_dyn_his_fixedo2', joboutput_filename_base = 'job_dyn_his_fixedo2', results_filename_base = 'results/game_results_dyn_his_fixedo2', simulate_his = True, fixed_o2 = True, max_walltime = 72)\n \n # his, variable o2\n print '\\n--- Creating job files for his, variable O2 ---'\n create_job_files(total_comb_num = total_comb_his, interval_size = interval_size_his, job_filename_base = 'jobs/job_dyn_his_variableO2', joboutput_filename_base = 'job_dyn_his_variableO2', results_filename_base = 'results/game_results_dyn_his_variableO2', simulate_his = True, fixed_o2 = False, max_walltime = 72)\n \n # atp, FixedO2\n print '\\n--- Creating job files for atp, fixedo2 ---'\n create_job_files(total_comb_num = total_comb_atp, interval_size = interval_size_atp, job_filename_base = 'jobs/job_dyn_atp_fixedo2', joboutput_filename_base = 'job_dyn_atp_fixedo2', results_filename_base = 'results/game_results_dyn_atp_fixedo2', simulate_his = False, fixed_o2 = True, max_walltime = 72)\n \n # atp, variable O2\n print '\\n--- Creating job files for atp, variableO2 ---'\n create_job_files(total_comb_num = total_comb_atp, interval_size = interval_size_atp, job_filename_base = 'jobs/job_dyn_atp_variableO2', joboutput_filename_base = 'job_dyn_atp_variableO2', results_filename_base = 'results/game_results_dyn_atp_variableO2', simulate_his = False, fixed_o2 = False, max_walltime = 72)", "def FSC2(input_dir, num_reps=50, min_sims=100000, max_ecm=20, calc_CI=False, numcores=1, scratch_mb='200', time_scratch=\"01:50:00\", mem=\"200\", print1=False, overwrite=\"None\", fsc2_path=\"/storage/plzen1/home/holcovam/programs/fsc26_linux64/fsc26\"):\n Data_Files = []\n tpl_files = []\n est_files = []\n CI_Data_Files = []\n shlist = []\n\n if input_dir.endswith(\"/\") is False:\n input_dir += \"/\"\n\n for path in os.listdir(input_dir):\n if os.path.isdir(input_dir + path) and path.startswith(\"FSC2input\"):\n samp_name = path.split(\"_\")[1]\n #folder_name = samp_name\n if samp_name + \"_DSFS.obs\" in os.listdir(input_dir + path):\n for i in range(0, num_reps):\n new_file = open(input_dir + path + \"/\" + samp_name + str(i) + \"_DSFS.obs\", 'w')\n with open(input_dir + path + \"/\" + samp_name + \"_DSFS.obs\") as data_file:\n for line in data_file:\n new_file.write(line)\n new_file.close()\n Data_Files.append(input_dir + path + \"/\" + samp_name + str(i) + \"_DSFS.obs\")\n else:\n print(\"Did not find input data file for: \", samp_name)\n if calc_CI == \"True\":\n num_files = 0\n for file in os.listdir(input_dir + path):\n if file.endswith(\"_DSFS.obs\") and file.split(\"_\")[-2].split(\".\")[-1][0:3] == \"rep\" and file != samp_name + \"_DSFS.obs\":\n for i in range(0, num_reps):\n new_file = open(input_dir + path + \"/\" + samp_name + file.split(\"_\")[-2].split(\".\")[-1].split(\"_\")[0]+ \"_\" + str(i) + \"_DSFS.obs\", 'w')\n with open(input_dir + path + \"/\" + file) as data_file:\n for line in data_file:\n new_file.write(line)\n new_file.close()\n CI_Data_Files.append(input_dir + path + \"/\" + samp_name + file.split(\"_\")[-2].split(\".\")[-1].split(\"_\")[0]+ \"_\" + str(i) + \"_DSFS.obs\")\n num_files += 1\n if len(CI_Data_Files) < 1:\n print(\"Did not find bootstrap replicates for: \", samp_name)\n else:\n print(\"Found \", num_files, \" replicate dsfs files for CI calculation for \", samp_name)\n if path.endswith(\".tpl\"):\n tpl_files.append(path)\n est_files.append(path.split(\".\")[0])\n if len(tpl_files) == 0:\n print(\"Did not find any tpl files!! Aborting!!\")\n else:\n if calc_CI == \"True\":\n Data_Files = CI_Data_Files\n for file in Data_Files:\n name = file.split(\"_DSFS\")[0]\n samp_name = name.split(\"/\")[-1]\n folder_name = samp_name [0:11]\n for tpl in tpl_files:\n tpl_name = tpl.split(\".tpl\")[0]\n if os.path.isdir(name + \"_\" + tpl_name) is False or overwrite == \"hard\":\n new_tpl = open(name + \"_\" + tpl_name + \".tpl\", 'w')\n new_data = open(name + \"_\" + tpl_name + \"_DSFS.obs\", 'w')\n\n with open(file, 'r') as data:\n for i, line in enumerate(data):\n if i == 1:\n pop_info = line.strip(\"\\n\").strip(\"\\t\").split(\"\\t\")\n pop_num = int(pop_info[0])\n samp_nums = pop_info[-pop_num:]\n new_data.write(line)\n with open(input_dir + tpl, 'r') as template:\n samp_num_lines = pop_num + 4\n for i, line in enumerate(template):\n if i < samp_num_lines:\n new_tpl.write(line)\n elif i == samp_num_lines:\n for num in samp_nums:\n new_tpl.write(num + \"\\n\")\n elif i >= samp_num_lines + len(samp_nums):\n new_tpl.write(line)\n new_est = open(name + \"_\" + tpl_name + \".est\", 'w')\n try:\n with open(input_dir + tpl_name + \".est\") as est:\n for line in est:\n new_est.write(line)\n except FileNotFoundError:\n print(\"Did not find est file for: \", tpl)\n #folder_name = samp_name ''.join(i for i in s if not i.isdigit())\n shname = name + \"_\" + tpl_name + \".sh\"\n shfile5 = open(shname, 'w')\n shfile5.write('#!/bin/bash -e\\n' +\n '#PBS -N '+samp_name+'\\n' +\n '#PBS -l walltime='+str(time_scratch)+'\\n' +\n '#PBS -l select=1:ncpus='+str(numcores)+':mem='+str(mem)+'mb:scratch_local='+str(scratch_mb)+'mb\\n' +\n '#PBS -m abe\\n' +\n '#PBS -j oe\\n\\n' +\n 'module add python-3.4.1-gcc\\n'+\n 'module add python34-modules-gcc\\n'+\n 'trap \\'clean_scratch\\' TERM EXIT\\n'+\n 'if [ ! -d \"$SCRATCHDIR\" ] ; then echo \"Scratch not created!\" 1>&2; exit 1; fi \\n' +\n 'DATADIR=\"/storage/plzen1/home/holcovam/ScanTools\"\\n' +\n 'cp $DATADIR/'+ input_dir + \"FSC2input_\" + folder_name+ \"/\" + samp_name + \"_\" + tpl_name + '* $SCRATCHDIR || exit 1\\n'+\n 'cp '+fsc2_path+' $SCRATCHDIR || exit 1\\n'+\n 'cd $SCRATCHDIR || exit 2\\n' +\n 'echo data loaded at `date`\\n\\n' +\n 'chmod +x fsc26 \\n' +\n #'ls -l \\n' +\n './fsc26 -t ' + samp_name + \"_\" + tpl_name + '.tpl -e ' + samp_name + \"_\" + tpl_name + '.est -n ' + str(min_sims) + ' -u -d -q -L ' + str(max_ecm) + ' -M \\n' + \n 'rm seed.txt \\n'+\n 'rm fsc26\\n'+\n 'rm *DSFS.obs\\n'+\n 'rm *.sh\\n'+\n 'rm *.tpl \\n'+\n 'rm *.est \\n'+\n #'ls -l \\n' +\n 'cp $SCRATCHDIR/*.par $DATADIR/'+ input_dir + \"FSC2input_\" + folder_name+' || exit 1\\n'+\n 'rm *.par \\n'+\n 'cp -r $SCRATCHDIR/* $DATADIR/'+input_dir+' || export CLEAN_SCRATCH=false\\n'+\n 'printf \"\\\\nFinished\\\\n\\\\n\"\\n')\n shfile5.close()\n shlist.append(shname)\n\n############IF PROBLEM WITH EXCESS OF NONCONVERGED CHAINS, COPY /home/majda/alpine/fastsimcoal2/afterWPSG/scripts/notConverged.py here ###################\n\n else:\n print(\"Output for \" + samp_name + \"_\" + tpl_name + \" already exists. Use hard_overwrite = True to overwrite.\")\n return shlist", "def update_compdatabase():\n for comp_group in comp_entry:\n#\n#--- read the last set of the input data and find the last entry \n#\n past = house_keeping + comp_group + '_past'\n past = mcf.read_data_file(past)\n\n last = past[-1]\n#\n#--- find today's data entry\n#\n cmd = 'ls /data/mta_www/mp_reports/*/' + comp_group + '/data/mta*fits* >' + zspace\n os.system(cmd)\n current = mcf.read_data_file(zspace)\n\n cmd = 'mv '+ zspace + ' ' + house_keeping + comp_group + '_past'\n os.system(cmd)\n#\n#--- find the data which are not read\n#\n new_fits = []\n chk = 0\n for ent in current:\n if chk == 0:\n if ent == last:\n chk = 1\n continue\n new_fits.append(ent)\n#\n#--- uppend the data to the local fits data files\n#\n for fits in new_fits:\n [cols, tbdata] = ecf.read_fits_file(fits)\n\n time = tbdata['time']\n\n for col in cols:\n#\n#--- ignore columns with \"ST_\" (standard dev) and time\n#\n if col.lower() == 'time':\n continue\n\n mc = re.search('st_', col.lower())\n if mc is not None:\n continue\n\n mdata = tbdata[col]\n cdata = [time, mdata]\n ocols = ['time', col.lower()]\n\n ofits = out_dir + col.lower()+ '_full_data.fits'\n if os.path.isfile(ofits):\n update_fits_file(ofits, ocols, cdata)\n else:\n create_fits_file(ofits, ocols, cdata)", "def run_all(self):\n w_df_stock_code = self.all_df_stock_code(ktype='w')\n df_w_p_macd = []\n df_kdj = []\n\n # 判断周线的MACD是红的\n ktype = 'w'\n for code in w_df_stock_code:\n df = pd.read_csv(self.data_dir + '/stock_%s_csv/%s.csv' % (ktype, code), index_col=['date'])\n\n # 特征处理\n df = df.sort_index() # 要正序才能计算KDJ\n df = self.SEE.macd(df)\n df = self.SEE.p_macd(df)\n df = df.sort_index(ascending=False) # 倒序\n if not df.empty and df.p_macd[0] == 1:\n df_w_p_macd.append(code)\n task_rate = round(((w_df_stock_code.index(code) + 1) / len(w_df_stock_code)) * 100, 2)\n print('\\r run_all_macd %s task: ' % ktype + str(task_rate) + '%', end='', flush=True)\n # 判断日线的p_kdj_macd\n print(len(df_w_p_macd))\n ktype = 'd'\n for code in df_w_p_macd:\n df = pd.read_csv(self.data_dir + '/stock_%s_csv/%s.csv' % (ktype, code), index_col=['date'])\n # =========================================================================================================\n # 特征处理\n df = df.sort_index() # 要正序才能计算KDJ\n df = self.SEE.kdj(df)\n df = self.SEE.kdj_macd(df)\n df = self.SEE.p_kdj_macd(df)\n df = df.sort_index(ascending=False) # 倒序\n # =========================================================================================================\n if not df.empty and -0.5 <= df.p_kdj_macd_d[0] <= 0 and df.kdj_k[0] < df.kdj_d[0]:\n df_kdj.append(code)\n task_rate = round(((df_w_p_macd.index(code) + 1) / len(df_w_p_macd)) * 100, 2)\n print('\\r run_all_kdj %s task: ' % ktype + str(task_rate) + '%', end='', flush=True)\n d = pd.DataFrame(data=df_kdj, columns=['code'], dtype=None, copy=False)\n d.to_csv(\n self.data_dir + '/res/' + '%s_run_all_%s.csv' % (time.strftime('%Y_%m_%d', time.localtime(time.time())), ktype),\n index=None)\n print(len(df_kdj) )\n print(df_kdj)\n return \"[%s]run_all:%s\\n\" % (ktype, df_kdj)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns if postcode like
def is_postal_code(elem): return 'post' in elem.attrib['k']
[ "def postcode(self):\n return self._postcode", "def is_valid_postcode(postcode):\n if len(postcode) != 6 or postcode[:2] != \"72\":\n return False\n return postcode.isdigit()", "def validate_postcode_format(self):\n\n assert type(self.postcodes) == str, \"To use this method, the postcode cannot be an iterable.\"\n pcd = self.postcodes.replace(' ', '')\n # The following regular expression matches are in order to adhere to the rules for UK postcodes given in the\n # documentation.\n first_char_alpha = re.match(r'^[a-zA-Z]', pcd)\n last_char_match = re.match(r'[a-zA-Z]', pcd[-1])\n alpha_match = re.search(r'[a-zA-Z]', pcd)\n numeric_match = re.search(r'[0-9]', pcd)\n special_chars_match = re.search(r'[!#,£$%^&*¬-]', pcd)\n if len(pcd) == 0:\n response = 'Null'\n elif (5 <= len(pcd) <= 7) and first_char_alpha and alpha_match and numeric_match \\\n and last_char_match and not special_chars_match:\n response = 'Valid Postcode Format'\n else:\n response = 'Invalid Postcode Format'\n return response", "def detect_postcode_type(postcode):\n postcode_pattern = r'^[A-Z]{1,2}[0-9]{1}[0-9A-Z]{0,1}[\\s]*[0-9][A-Z]{2}$'\n district_pattern = r'^[A-Z]{1,2}[0-9]{1}[0-9A-Z]{0,1}$'\n area_pattern = r'^[A-Z]{1,2}$'\n\n postcode = clean_postcode(postcode)\n\n # Convert x to a pandas series\n postcode = pd.Series(np.atleast_1d(postcode))\n\n postcode_type = np.where(\n postcode.str.match(postcode_pattern), 'postcode',\n np.where(\n postcode.str.match(district_pattern), 'district',\n np.where(\n postcode.str.match(area_pattern), 'area', 'none'\n )\n )\n )\n\n return postcode_type", "def _getPostalCode(row):\n patt = r\"\\d{6}\"\n for value in row:\n mo = re.search(patt, str(value))\n if mo:\n # Instead of returning value here, I did this monster\n # because Outlet Yishun 747's postalcode had \\xa0 in it.\n # It's a unicode thing, note to self for future learning.\n return str(f\"Singapore {mo.group()}\")", "def normalise_postcode(postcode):\n\n postcode = NON_ALPHA_RE.sub(\"\", postcode.upper())\n postcode = postcode[:-3] + \" \" + postcode[-3:]\n if POSTCODE_RE.match(postcode):\n return postcode\n return None", "def verify_postcode_api(self):\n\n assert type(self.postcodes) == str, \"To use this method, the postcode cannot be an iterable.\"\n request_path = requests.get(self.path + self.postcodes, verify=False)\n response_code = str(request_path)\n\n if response_code == '<Response [200]>':\n verification_status = 'Verified'\n elif response_code == '<Response [404]>':\n verification_status = 'Invalid Postcode'\n elif response_code == '<Response [400]':\n verification_status = 'No Postcode Submitted'\n elif response_code == '<Response [500]':\n verification_status = 'Server error'\n else:\n verification_status = 'Invalid Postcode'\n return verification_status", "def clean_postcode(postcode):\n\n # delete -XXXX after the five digit postcode\n if \"-\" in postcode:\n return postcode.split(\"-\")[0]\n\n # delete MA in the postcodes\n elif \"MA\" in postcode:\n new_postcode = postcode.replace(\"MA \", \"\")\n if len(new_postcode) == 5:\n return new_postcode\n else:\n return \"00000\"\n\n # return \"00000\" for postcodes that are less than 5 digits\n elif len(postcode) < 5:\n return \"00000\"\n\n # return \"00000\" for postcodes that are outside the area\n elif postcode == \"01125\" or postcode == \"20052\" or postcode == \"01238\" or postcode == \"01240\" or postcode == \"01250\":\n return \"00000\"\n else:\n return postcode", "def test_can_lookup_postcode(self):\n postcode_to_lookup = \"SW1A 1AA\"\n os_places_key = self.app.config.get(\"OS_PLACES_API_KEY\")\n addresses = AddressLookup(key=os_places_key).by_postcode(postcode_to_lookup)\n self.assertGreater(len(addresses), 0)\n result_postcode = addresses[0].get(\"DPA\", {}).get(\"POSTCODE\")\n self.assertEqual(result_postcode, postcode_to_lookup)", "def test_search_zip_post_code(self):\n pass", "def update_postcode(postcode):\n \n if CA_INCLUDED.search(postcode):\n modified_postcode = CA_INCLUDED.sub(r'', postcode)\n \n elif postcode == 'CUPERTINO':\n modified_postcode = '95014'\n \n elif postcode == '95914':\n modified_postcode = '95014'\n \n elif postcode == '95014-2143;95014-2144':\n modified_postcode = '95014'\n \n elif postcode == u'94087\\u200e':\n modified_postcode = '94087'\n \n else:\n modified_postcode = five_digit_postcode(postcode)\n \n return modified_postcode", "def fix_postcode(raw_postcode):\n if not is_valid_postcode(raw_postcode):\n return None\n else:\n return raw_postcode", "def get_info_on_postalcode(_, postalcode):\n fourpp = int(postalcode[0:4])\n chars = postalcode[4:6]\n streets = get_streets(fourpp, chars)\n if streets:\n street = streets[0]\n town = street.postcode.city.get_official_name()\n address = street.street\n data = {'found': True, 'address': address, 'town': town}\n else:\n data = {'found': False}\n j = json.dumps(data)\n return HttpResponse(j, content_type='application/json')", "def extract_postcode(s):\n pc_regex = r'([Gg][Ii][Rr] 0[Aa]{2})|((([A-Za-z][0-9]{1,2})|(([A-Za-z][A-Ha-hJ-Yj-y]'\n pc_regex += r'[0-9]{1,2})|(([A-Za-z][0-9][A-Za-z])|([A-Za-z][A-Ha-hJ-Yj-y][0-9]?[A-Za-z]'\n pc_regex += r'))))\\s?[0-9][A-Za-z]{2})'\n\n re_search = re.search(pc_regex, s)\n if re_search:\n p = re_search.group(0)\n else:\n p = ''\n return p", "def out_of_range(postcode):\n\n valid_zipcode = ['940', '945', '950', '951']\n if first_3_digit.search(postcode).group() not in valid_zipcode:\n return True\n else:\n return False", "def update_postcode(postcode_value):\r\n postcode_mapping = {'78621' : '78681', '787664' : '78664', '78728-1275' : '78728'}\r\n\r\n if postcode_value not in postcode_mapping.keys():\r\n return postcode_value\r\n else:\r\n print(postcode_value, \"cleaned to -->\", postcode_mapping[postcode_value])\r\n return(postcode_mapping[postcode_value])", "def is_valid(self, post_code):\n\n for REGEX in self.REGEXs:\n if re.match(REGEX, post_code):\n return True\n\n return False", "def us_ppop(ppop):\n # return false if it's null or not 7 digits long\n if not ppop or len(ppop) != 7:\n return False\n\n ppop = ppop.upper()\n if ppop[:2] in g_state_by_code or ppop[:2] in g_state_code_by_fips:\n return True\n\n return False", "def _is_valid_code(self, code):\r\n return code in COUNTRY_CODES" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dictionary named switcher to store all the switchlike cases. When you pass an argument to the switch_demo function, it is looked up against the switcher dictionary mapping. If a match is found, the associated value is printed, else a default string ('Invalid Month') is printed. The default string helps implement the 'default case' of a switch statement.
def switch_demo(argument): switcher = { 1: "January", 2: "February", 3: "March", 4: "April", 5: "May", 6: "June", 7: "July", 8: "August", 9: "September", 10: "October", 11: "November", 12: "December" } return switcher.get(argument, "Invalid month")
[ "def explaining_switch_with_default_dict(character2):\n character1 = defaultdict(lambda :'some other character',character)\n return character2+\":\"+character1[character2]", "def dia_semana_switch(num):\n switcher = {\n 1: \"Lunes\",\n 2: \"Martes\",\n 3: \"Miercoles\",\n 4: \"Jueves\",\n 5: \"Viernes\",\n 6: \"Sabado\",\n 7: \"Domingo\",\n }\n return switcher.get(num, \"Error, el numero tiene que estar entre 1 y 7.\")", "def test_switch(self):\n self.assertEqual(utils.switch('foo',\n foo=3,\n bar=6), 3)\n self.assertEqual(utils.switch('blonk',\n foo=3,\n bar=6,\n default=7), 7)\n self.assertEqual(utils.switch('foo', resolve_functions=True,\n foo=lambda: 7,\n bar=lambda: 11), 7)", "def test_switch_with_default(self):\n qubit = Qubit()\n creg = ClassicalRegister(2, \"c\")\n case1 = QuantumCircuit([qubit], creg)\n case1.x(0)\n case2 = QuantumCircuit([qubit], creg)\n case2.y(0)\n case3 = QuantumCircuit([qubit], creg)\n case3.z(0)\n\n circuit = QuantumCircuit([qubit], creg)\n circuit.switch(creg, [(0, case1), (1, case2), (CASE_DEFAULT, case3)], [0], circuit.clbits)\n\n test = dumps(circuit, experimental=ExperimentalFeatures.SWITCH_CASE_V1)\n expected = \"\"\"\\\nOPENQASM 3;\ninclude \"stdgates.inc\";\nbit[2] c;\nint switch_dummy;\nqubit _qubit0;\nswitch_dummy = c;\nswitch (switch_dummy) {\n case 0: {\n x _qubit0;\n }\n break;\n case 1: {\n y _qubit0;\n }\n break;\n default: {\n z _qubit0;\n }\n break;\n}\n\"\"\"\n self.assertEqual(test, expected)", "def choose_month():\r\n\r\n # get user input for month (all, january, february, ... , june)\r\n\r\n while True:\r\n month=input(\"\\nWould you like to analyze a specific month or should all months be displayed?\\nPlease make your choice:\\n\\nJanuary (1), \\nFebruary (2),\\nMarch (3), \\nApril (4),\\nMay (5),\\nJune (6),\\nall months (all)?\\n\\n\").lower()\r\n if month=='january' or month=='1':\r\n print (\"\\nYour choice: January\\n\")\r\n return 'january'\r\n elif month=='february' or month=='2':\r\n print (\"\\nYour choice: February\\n\")\r\n return 'february'\r\n elif month=='march' or month=='3':\r\n print (\"\\nYour choice: March\\n\")\r\n return 'march'\r\n elif month=='april' or month=='4':\r\n print (\"\\nYour choice: April\\n\")\r\n return 'april'\r\n elif month=='may' or month=='5':\r\n print (\"\\nYour choice: May\\n\")\r\n return 'may'\r\n elif month=='june' or month=='6':\r\n print (\"\\nYour choice: June\\n\")\r\n return 'june'\r\n if month not in ('1', '2', '3', '4', '5', '6', 'january', 'february', 'march', 'april', 'may', 'june', 'all'):\r\n print(\"\\nInvalid input, please try again :)\")\r\n continue\r\n else:\r\n break\r\n\r\n return month", "def get_month():\n while True:\n mnth_str = \"\\nMonth? \"\n for i, v in enumerate(MONTHS_DATA):\n mnth_str += \"{}.{} \".format(i, v.title())\n mnth_str += \"\\n\"\n\n month = input(mnth_str).lower()\n\n if month in (\"1\", \"jan\"):\n month = \"january\"\n elif month in (\"2\", \"feb\"):\n month = \"february\"\n elif month in (\"3\", \"mar\"):\n month = \"march\"\n elif month in (\"4\", \"apr\"):\n month = \"april\"\n elif month in (\"5\", \"may\"):\n month = \"may\"\n elif month in (\"6\", \"jun\"):\n month = \"june\"\n elif month in (\"0\", \"all\"):\n month = \"all\"\n break\n\n if month in MONTHS_DATA:\n break\n\n print(\"\\nIncorrect MONTH option specified...Please retry.]\\n\")\n\n return month", "def __checkSwitch ( self, letter, value ):\n\n #-- 1 --\n # [ if letter is a key in self.switchMap -> I\n # else ->\n # sys.stderr +:= (usage message) + (error message)\n # stop execution ]\n if not self.switchMap.has_key ( letter ):\n usage ( self.switchSpecs, self.posSpecs,\n \"No such switch: -%s\" % letter )\n\n #-- 2 --\n if len(value) == 0:\n self.switchMap[letter] = 1\n else:\n self.switchMap[letter] = value", "def test_formatter_default(self, capsys):\n f = Formatter(format_='default', output=sys.stdout)\n f.printer(RESULTS)\n out, _ = capsys.readouterr()\n assert \"'Case': '19-0400694'\" in out", "def test_011_month_en():\n tr = i18n.I18N('en')\n assert tr._monthName(\"JAN\") == \"Jan\"\n assert tr._monthName(\"jan\") == \"Jan\"\n assert tr._monthName(\"Jan\") == \"Jan\"\n assert tr._monthName(\"DEC\") == \"Dec\"\n assert tr._monthName(\"dec\") == \"Dec\"", "def test_switch_number_string(self):\n switch_string = '6331101999990016'\n self.assertTrue(formatter.is_switch(switch_string))", "def day_name(num):\n if num == 0:\n return \"Sunday\"\n if num == 1:\n return \"Monday\"\n if num == 2:\n return \"Tuesday\"\n if num == 3:\n return \"Wednesday\"\n if num == 4:\n return \"Thursday\"\n if num == 5:\n return \"Friday\"\n if num == 6:\n return \"Saturday\"\n else:\n return None", "def get_month(filters):\n\n if filters == 'None' or filters == 'Day':\n return 'All'\n\n while True:\n month = input('\\nChoose the month by which you want to filter the data:\\n1) January' +\n '\\n2) February\\n3) March\\n4) April\\n5) May\\n6) June\\nPlease input numbers only(1-6):\\n')\n month = month.title()\n months = ['January', 'February', 'March', 'April', 'May', 'June']\n\n # Attempting to decode mnemonic input else prompt for input again\n if month not in months:\n if month == '1' or month == 'Jan':\n month = 'January'\n elif month == '2' or month == 'Feb':\n month = 'February'\n elif month == '3' or month == 'Mar':\n month = 'March'\n elif month == '4' or month == 'Apr':\n month = 'April'\n elif month == '5' or month == 'Ma':\n month = 'May'\n elif month == '6' or month == 'Jun':\n month = 'June'\n else:\n print('\\n******************INVALID INPUT*******************')\n print('Please select any option from (1-7):')\n continue\n break\n return month", "def test_011_month_ru():\n tr = i18n.I18N('ru')\n assert tr._monthName(\"JAN\") == \"Янв\"\n assert tr._monthName(\"jan\") == \"Янв\"\n assert tr._monthName(\"Jan\") == \"Янв\"\n assert tr._monthName(\"DEC\") == \"Дек\"\n assert tr._monthName(\"dec\") == \"Дек\"", "def month_dict(arg=None):\n mdict = {}\n dt = [0, 0, 0, 0, 0, 0, 0, 0, 0]\n for month in range(1, 13):\n dt[1] = month\n mname = time.strftime(\"%b\", dt)\n mdict[mname] = month\n return mdict", "def default(arg):\n return arg + ' [Default: %default]'", "def _get_month_by_name(month):\n month = str(month).lower().replace('.', '')\n if month in ('jan', 'january'):\n return 0\n elif month in ('feb', 'february'):\n return 1\n elif month in ('mar', 'march'):\n return 2\n elif month in ('apr', 'april'):\n return 3\n elif month in ('may',):\n return 4\n elif month in ('jun', 'june'):\n return 5\n elif month in ('jul', 'july'):\n return 6\n elif month in ('aug', 'august'):\n return 7\n elif month in ('sep', 'september'):\n return 8\n elif month in ('oct', 'october'):\n return 9\n elif month in ('nov', 'november'):\n return 10\n elif month in ('dec', 'december'):\n return 11\n raise ValueError('Month :%s now known!' % month)", "def print_msg_switch(data):\n\n print(format_message(data, 10, \"Switch\"))", "def load_case_default(elem, symast, symimp):\n return ast.make_case_br(None, load_sub(elem.find(\"./Choice/Then/*\"),\n symast, symimp))", "def _build_labels_case_statement(self, rate_dict, label_type, default_rate=None):\n case_dict = {}\n for tag_key, tag_value_rates in rate_dict.items():\n statement_list = [\"CASE\"]\n for tag_value in tag_value_rates:\n statement_list.append(\n f\"\"\"\n WHEN {label_type}->>'{tag_key}'='{tag_value}'\n THEN '{{\"{tag_key}\": \"{tag_value}\"}}'\n \"\"\"\n )\n if default_rate:\n statement_list.append(\n f\"\"\"\n ELSE '{{\"{tag_key}\": \"' || cast({label_type}->>'{tag_key}' as text) || '\"}}'\n \"\"\"\n )\n statement_list.append(f\"END as {label_type}\")\n case_dict[tag_key] = \"\\n\".join(statement_list)\n return case_dict" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove journal entry at position `pos`.
def remove_entry(self, pos: int) -> None: del self.entries[pos]
[ "def _remove(self, pos):\n node = self._validate(pos)\n\n if self.num_children(pos) > 1:\n raise ValueError('pos have more than one child:', pos._node._element, list(pos._node._children.values()))\n\n # node is a leaf node\n if len(node._children) == 0:\n if not node is self._root:\n del node._parent._children[node] # delete node from children dict.\n node._parent = node # deprecate node.\n self._size -= 1\n return node._element\n\n # node is an internal node with one child.\n else:\n child = list(node._children.values())[0]\n # node is root node (i.e. does not have parent.)\n if node is self._root:\n self._root = child\n # node has parent node\n else:\n parent = node._parent\n parent._children[child] = child\n del parent._children[node] # delete node from children dict.\n print(\"parent node: \", parent)\n print('parent node children: ', parent._children)\n\n self._size -= 1\n node._parent = node # deprecate node.\n return node._element", "def eliminar(self, pos):\n\n if pos > self._tamanio - 1 or pos < 0:\n raise IndexError(\"Posición inválida\")\n\n actual = self._inicio\n count = 0\n while count != pos + 1:\n siguiente = actual.getSiguiente()\n if pos == 0 and siguiente is None:\n self._inicio = None\n elif pos == 0 and siguiente is not None:\n self._inicio = siguiente\n elif count == pos - 1:\n actual.setSiguiente(siguiente.getSiguiente())\n\n actual = siguiente\n count += 1\n\n self._tamanio -= 1", "def delete_row(self, pos):\n del self._grid[pos]", "def pyreadline_remove_history_item(pos: int) -> None:\n # Save of the current location of the history cursor\n saved_cursor = readline.rl.mode._history.history_cursor\n\n # Delete the history item\n del readline.rl.mode._history.history[pos]\n\n # Update the cursor if needed\n if saved_cursor > pos:\n readline.rl.mode._history.history_cursor -= 1", "def delete(self,pos):\n pos.next = pos.next.next", "def delete_data(self, *pos):\n r, c = pos\n self._grid[r][c] = None", "def _del(pos, l):\n if pos == 0:\n return l[1:]\n elif pos == len(l) - 1:\n return l[:pos]\n else:\n return l[0:pos] + l[pos+1:]", "def delete(self,pos):\n temp=self.head\n for i in range(self.size):\n while(i==pos and temp.nxt!=None):\n a=temp.nxt\n temp.nxt=a.nxt\n self.size-=1\n if(temp.nxt=None):\n break\n temp=temp.nxt\n\n pass", "def remove_from_level(level, position):\n size = level_size(level)\n index = position_to_index(position, size)\n level = level[:index] + AIR + level[1 + index:]\n return level", "def deleteLL(self,pos):\n pos.next = pos.next.next", "def delete(self, position) -> str:\n self.__clock += 1\n old_char = self.__doc[position + 1]\n self.__doc.remove(old_char)\n return self.__export(\"d\", old_char)", "def remove_entry(self, entry: data.Entry) -> None:\n del self.entries[entry]", "def delete_link(self, pos):\n # DEBUG\n #debug(\"delete_link\", (\"cond\", pos in self._my_links.viewkeys()))\n if pos in self._my_links.viewkeys():\n temp = self._my_links.pop(pos)\n del temp\n if pos in self._links.viewkeys():\n temp = self._links.pop(pos)\n del temp", "def delete(self, del_pos=None):\n if del_pos is None:\n del_pos = self.__length\n if self.__list is None:\n print \"Nothing to remove.\"\n else:\n if del_pos == 0:\n self.__list = self.__list.get_next()\n else:\n prior = self.__list\n current = self.__list.get_next()\n current_pos = 1\n while current.get_next() is not None and current_pos < del_pos:\n prior = current\n current = current.get_next()\n current_pos += 1\n prior.set_next(current.get_next())\n self.__length -= 1", "def delete_node_at_pos(self, pos):\n if self.head:\n cur_node = self.head\n if pos == 0:\n self.head = cur_node.next\n cur_node = None\n return \n\n prev = None\n count = 0 \n while cur_node and count != pos:\n prev = cur_node\n cur_node = cur_node.next\n count += 1\n\n if cur_node is None:\n return \n\n prev.next = cur_node.next\n cur_node = None", "def delete(self, node):\n\n # logger_cagada.debug(\"norrando nodo %s\" % (type(node)))\n entry = self.entry_finder.pop(node)\n # logger_cagada.debug(\"la entry q c borra %s\" % entry)\n entry[-1] = self.REMOVED\n # logger_cagada.debug(\"el heap es %s\" % self.heap)\n return entry[0]", "def remove_entry(self, entry):\n self.libraries['surface'].entries.pop(f'{entry.label}')", "def delete_column(self, pos):\n for i in range(len(self._grid)):\n del self._grid[i][pos]", "def deleteEntry(entry_id):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save journal entries into a file.
def save(journal: Journal, file: Path) -> None: with open(file, "w") as output: output.writelines(f"{entry}\n" for entry in journal.get_entries())
[ "def save(name, journal_data):\n filename = get_full_pathname(name)\n print(\"Saving to: {}\".format(filename))\n file_out = open(filename, 'w')\n\n for entry in journal_data:\n file_out.write(entry + '\\n')\n\n file_out.close()", "def save_exit(name, data):\n jrn_path = build_path(name)\n print(f'... saving new journal entries to {jrn_path} ...')\n with open(jrn_path, 'w') as file:\n for line in data:\n file.write(line + '\\n')\n print('... save complete ...')", "def exportJournal(self):\n\n x = self.tableWidget_journals.currentRow()\n fileName = self.journals[x]['name']\n fileName += \".txt\"\n options = QtGui.QFileDialog.DontResolveSymlinks | QtGui.QFileDialog.ShowDirsOnly\n directory = QtGui.QFileDialog.getExistingDirectory(None, \"Select directory to save file\", os.getenv('HOME'), options)\n if directory:\n fileName = directory + \"/\" + fileName\n print (\"Exporting: to \" + fileName)\n data = self.journals[x]['journal']\n f = open(fileName, 'w')\n f.write(data)\n f.close()\n self.log += \"Journal \" + fileName + \" exported\"\n QtGui.QMessageBox.information(None, \"Journal Export\", str(fileName)+\" exported\\n\")", "def write(self, filename):\n with open(filename, 'w') as f:\n for entry in self.entrys:\n f.write(self._entry_to_string(entry) + '\\n')\n logging.info('Wrote {0} entrys to file {1}'.format(\n len(self.entrys), filename))", "def __save_log_to_file(log_content):\n with open(LOG_PATH, mode='a', encoding='utf8') as file:\n file.write(log_content + '\\n')", "def write_entries(self, entries):\n for entry in entries:\n self.write(entry)", "def write_all(self):\n for f in self.folders:\n self.write(f)\n for n in self.journalentries:\n self.write(n)", "def write_notes(notes, filename):\n _filepath = os.path.abspath(filename)\n _dirname = os.path.dirname(_filepath)\n if not os.path.exists(_dirname):\n os.makedirs(_dirname)\n if os.path.exists(_filepath):\n os.remove(_filepath)\n\n with open(_filepath, 'wb') as handle:\n pickle.dump(notes, handle, protocol=pickle.HIGHEST_PROTOCOL)", "def save(self, path=None):\n if path is None:\n path = self.path\n try:\n with open(path, 'w') as fd:\n for entry in self:\n fd.write('{}\\n'.format(entry))\n except Exception as e:\n raise SSHKeyError('Error writing {}: {}'.format(path, e))", "def write_log_to_file(self):\n if self.__log:\n f = open(\"log.txt\", \"a+\")\n for message in self.__log:\n f.write(message + \"\\n\")\n f.close()", "def save_entry(title, content):\n filename = f\"entries/{title}.md\"\n if default_storage.exists(filename):\n default_storage.delete(filename)\n default_storage.save(filename, ContentFile(content))", "def write_to_file(self, content):\n try:\n with open(self.full_path_to_file, \"wb\") as fp:\n fp.write(content)\n except PermissionError:\n logging.error(\n \"Conversion cannot be performed. Permission denied for this directory\"\n )\n sys.exit()\n self.logger.info(\"News has been successfully converted\")", "def write_the_contents_to_the_same_file(self):\n if not len(self.student_list):\n print('There is no contents to write')\n return\n\n if self._filename is None:\n self._filename = self.input_filename()\n\n with open(self._filename, 'w') as OUT:\n OUT.write(self.student_list.to_csv(date_format='%Y-%m-%d',\n sep='\\t', header=False, columns=self.columns_to_save))\n print(f'Data are saved into {self._filename!r}')", "def save_note(self, filename:str):\n self.path = os.path.join(notes_dir, filename)\n fp = open(self.path, 'w+')\n fp.write('\\n'.join([self.title, self.body_text]))\n fp.close()", "def write_to_file(self, papers, filename):\n\t\tpass", "def write_to_file(self):\n self._file_writer.write(self._reconstructed_sentences)", "def write_log_to_file(filename, content):\n append_to_file(filename, content)", "def saveLog(self):\n name = \"Log_\"+time.asctime().replace(' ', '_')+\".txt\"\n f = open(name, \"w\")\n text = self.plainText_Log.toPlainText()\n f.write(text)\n f.close()\n self.statusbar.showMessage(\"Saved log\")\n msg = \"Saved Log to File : {}\".format(name)\n self.addLogEntry(msg)", "def saveLogFile(self, fname = \"data/status.txt\"):\n with open(fname, 'w') as f:\n f.write(\"<br>\\n\".join(self.logLines))\n self.log(\"wrote \"+fname)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load journal entries from a file.
def load(journal: Journal, file: Path) -> None:
[ "def load(name):\n jrn_path = build_path(name)\n if not os.path.exists(jrn_path):\n print(f'... journal file \\'{jrn_path}\\' does not exist ...')\n print('... initializing new journal ...')\n with open(jrn_path, 'w') as file:\n pass\n return []\n else:\n print(f'... loading journal entries from {jrn_path} ...')\n journal = []\n with open(jrn_path, 'r') as file:\n for line in file:\n journal.append(line.rstrip())\n print(f'... loaded {len(journal)} items')\n return journal", "def load_employees(file_path):\n\temployees = []\n\tfor line in open(file_path):\n\t\temployee = Employee.employee_from_insert_stmnt(line)\n\t\tif employee:\n\t\t\temployees.append(employee)\n\treturn employees", "def open(self, filename=None):\n filename = filename or self.config['journal']\n\n if self.config['encrypt']:\n with open(filename, \"rb\") as f:\n journal_encrypted = f.read()\n\n def validate_password(password):\n self.make_key(password)\n return self._decrypt(journal_encrypted)\n\n # Soft-deprecated:\n journal = None\n if 'password' in self.config:\n journal = validate_password(self.config['password'])\n if journal is None:\n journal = util.get_password(keychain=self.name, validator=validate_password)\n else:\n with codecs.open(filename, \"r\", \"utf-8\") as f:\n journal = f.read()\n self.entries = self._parse(journal)\n self.sort()", "def load_entities (self, path=PATH_JOURNALS):\n with codecs.open(path, \"r\", encoding=\"utf8\") as f:\n journals = json.load(f)\n\n # find the next ID to use\n ids = sorted([int(j[\"id\"].replace(\"journal-\", \"\")) for j in journals])\n self.next_id = ids[-1]\n\n # scan for duplicates\n self.known = {}\n\n for journal in journals:\n if \"issn\" in journal:\n for issn in journal[\"issn\"]:\n self.seen_issn.add(issn)\n\n for title in journal[\"titles\"]:\n title_key = title.strip().lower()\n\n if title_key in self.known:\n print(\"DUPLICATE JOURNAL: {}\".format(title))\n else:\n self.known[title_key] = journal", "def get_journal_entries_text():\n # TODO: Update this section to open and scan text of all files in directory into objects array\n journal_entry_file_names = get_journal_entry_file_names()\n\n entries = []\n for entry in journal_entry_file_names:\n #get/set entry day\n entry_day_extension = entry.split(' ')\n entry_day = entry_day_extension[1].split('.')\n \n full_file_path = journal_directory_path + \"\\\\\" + entry\n my_file = open(full_file_path, \"r\")\n \n if my_file.mode == 'r':\n #get/set entry text\n file_lines = my_file.readlines()\n entry_text = ' '.join(file_lines)\n #get/set entry date\n dirty_entry_date = entry_text.splitlines()[1]\n entry_date = dirty_entry_date.strip()\n #get/set entry sentiment\n sentiment_analysis = SentimentAnalysis.get_sentiment_analysis(entry_text)\n #create/append entry\n je = Entry(entry_day[0], entry_date, entry_text,sentiment_analysis)\n entries.append(je)\n my_file.close()\n return entries", "def _parse(self, journal_txt):\n\n # Entries start with a line that looks like 'date title' - let's figure out how\n # long the date will be by constructing one\n date_length = len(datetime.today().strftime(self.config['timeformat']))\n\n # Initialise our current entry\n entries = []\n current_entry = None\n\n for line in journal_txt.splitlines():\n line = line.rstrip()\n try:\n # try to parse line as date => new entry begins\n new_date = datetime.strptime(line[:date_length], self.config['timeformat'])\n\n # parsing successful => save old entry and create new one\n if new_date and current_entry:\n entries.append(current_entry)\n\n if line.endswith(\"*\"):\n starred = True\n line = line[:-1]\n else:\n starred = False\n\n current_entry = Entry.Entry(self, date=new_date, title=line[date_length+1:], starred=starred)\n except ValueError:\n # Happens when we can't parse the start of the line as an date.\n # In this case, just append line to our body.\n if current_entry:\n current_entry.body += line + \"\\n\"\n\n # Append last entry\n if current_entry:\n entries.append(current_entry)\n for entry in entries:\n entry.parse_tags()\n return entries", "def load_articles():\n\t\n\tlog(\"Reading articles from file: articles_dumped...\")\n\tf = open(os.path.join(logdir, \"articles_dumped\"), 'rb')\n\tdumped = f.read()\n\tf.close()\n\t\n\tarticles = pickle.loads(dumped)\n\t\n\tlog(\"Done!\")\n\tsys.stdout.write(\"Done!\\n\")\n\tsys.stdout.flush()\n\t\n\treturn articles", "def load_recipes_from_file(cls, args):\n with open(args.recipes_file, 'r') as f:\n reader = csv.DictReader(f)\n for row in reader:\n cls._recipes.append(row)\n cls._add_indices_to_recipes()\n cls._initialize_recipes_status()\n logging.info(\"Recipes loaded.\")", "def save(journal: Journal, file: Path) -> None:\n with open(file, \"w\") as output:\n output.writelines(f\"{entry}\\n\" for entry in journal.get_entries())", "def read_journal(journal_path):\n detected = {}\n with open(journal_path, 'rb') as journal:\n journal_name = op.basename(journal_path)\n for line in journal:\n decoded_line = line.decode(\"latin1\", \"ignore\")\n for key_phrase in key_phrases:\n if key_phrase in decoded_line:\n print(colorful.bold_red(f\"-!!_found:{key_phrases[key_phrase]}_!!\"))\n print(\" journal path: {}\".format(journal_path))\n # print(decoded_line)\n detected[key_phrases[key_phrase]] = journal_name + decoded_line\n return detected\n if not detected:\n detected[\"nothing detected in\"] = journal_name\n return detected", "def load_entities (self, path=PATH_AUTHORS):\n self.known.read_authors(path)", "def load(self, filename):\r\n print(\"Loading dictionary. Please wait ...\")\r\n infile = None \r\n if (self.__conn is not None):\r\n try:\r\n infile = open(filename, encoding=\"utf8\")\r\n for line in infile:\r\n word = line.replace(\"\\n\",\"\").upper()\r\n if (\"'\" not in word):\r\n self.__cursor = self.__conn.cursor()\r\n self.__cursor.execute(\"SELECT word FROM en_dict \" \r\n \"WHERE word = '%s';\" % word)\r\n if (self.__cursor.fetchone() is None):\r\n self.__conn.execute(\"INSERT INTO en_dict VALUES (?,?);\",\r\n self.get_pattern(word))\r\n self.__conn.commit()\r\n self.__conn.execute(\"VACUUM;\")\r\n\r\n except (IOError, OSError, sqlite3.Error) as err:\r\n print(err)\r\n finally:\r\n if infile is not None:\r\n infile.close()\r\n\r\n self.__recordcount()", "def load(self, file):\n\n with open(file, 'r') as f:\n self._lines = Lines(f.read().splitlines())\n\n self._parse()", "def LoadFromFile(self, filename):\n f = open(filename, 'r')\n contents = f.read()\n self.LoadFromData(contents)\n f.close()", "def do_import(self, file):\n UglyBibtex(file).do_import()", "def load_posts():\n \n with open(FILE_NAME, 'r') as f:\n return pickle.load(f)", "def from_dictionary_file(self, path):\n with open(path, 'r') as f:\n for word in f.readlines():\n self.insert(word[:-1])", "def load_biblio(self, file_name, preload_ids=False, chunksize=1000):\n\n logger.info( \"Loading biblio data from [{}], with chunk size {}. Preload IDs? {}\".format(file_name, chunksize, preload_ids) )\n\n input_file = codecs.open(file_name, 'r', 'utf-8')\n biblio = json.load(input_file)\n\n sql_alc_conn = self.db.connect()\n db_api_conn = sql_alc_conn.connection\n\n if (\"cx_oracle\" in str(self.db.dialect)):\n title_ins = DBBatcher(db_api_conn, 'insert into schembl_document_title (schembl_doc_id, lang, text) values (:1, :2, :3)')\n classes_ins = DBBatcher(db_api_conn, 'insert into schembl_document_class (schembl_doc_id, class, system) values (:1, :2, :3)')\n else:\n title_ins = DBBatcher(db_api_conn, 'insert into schembl_document_title (schembl_doc_id, lang, text) values (%s, %s, %s)')\n classes_ins = DBBatcher(db_api_conn, 'insert into schembl_document_class (schembl_doc_id, class, system) values (%s, %s, %s)')\n\n\n ########################################################################\n # STEP 1: If overwriting, find extant docs and pre-populate doc ID map #\n ########################################################################\n\n extant_docs = set()\n\n if self.overwrite or preload_ids:\n\n for chunk in chunks(biblio, chunksize):\n\n # Loop over all biblio entries in this chunk\n doc_nums = set()\n for bib in chunk[1]:\n\n input_pubnum = self._extract_pubnumber(bib)\n\n # Early return: don't bother querying if we already have an ID\n if input_pubnum in self.doc_id_map:\n extant_docs.add( input_pubnum ) \n continue\n\n doc_nums.add(input_pubnum)\n\n if len(doc_nums) == 0:\n continue\n\n self._fill_doc_id_map(doc_nums, sql_alc_conn, extant_docs)\n\n logger.info( \"Discovered {} existing IDs for {} input documents\".format( len(extant_docs),len(biblio)) )\n\n\n ########################################################\n # STEP 2: Main biblio record processing loop (chunked) #\n ########################################################\n\n for chunk in chunks(biblio, chunksize):\n\n logger.debug( \"Processing {} biblio records, up to index {}\".format(len(chunk[1]), chunk[0]) )\n\n new_doc_mappings = dict() # Collection IDs for totally new document \n overwrite_docs = [] # Document records for overwriting\n duplicate_docs = set() # Set of duplicates to read IDs for\n known_count = 0 # Count of known documents\n\n new_titles = []\n new_classes = [] \n\n doc_insert_time = 0\n\n\n transaction = sql_alc_conn.begin()\n\n for bib in chunk[1]:\n\n ########################################\n # STEP 2.1 Extract core biblio records #\n ########################################\n\n family_id, pubdate, pubnumber, assign_applic = self._extract_core_biblio(bib)\n\n life_sci_relevant = self._extract_life_sci_relevance(bib)\n\n\n ####################################################\n # Step 2.2 Overwrite or Insert the document record #\n ####################################################\n\n if pubnumber in extant_docs:\n\n known_count += 1\n\n if self.overwrite:\n # Create an overwrite record\n doc_id = self.doc_id_map[pubnumber] \n overwrite_docs.append({\n 'extant_id' : doc_id,\n 'new_published' : pubdate,\n 'new_family_id' : family_id,\n 'new_life_sci_relevant' : life_sci_relevant,\n 'new_assign_applic' : assign_applic })\n else:\n # The document is known, and we're not overwriting: skip\n continue\n\n else:\n \n # Create a new record for the document\n record = {\n 'scpn' : pubnumber,\n 'published' : pubdate,\n 'family_id' : family_id,\n 'assign_applic' : assign_applic,\n 'life_sci_relevant' : int(life_sci_relevant) }\n \n try:\n\n start = time.time()\n result = sql_alc_conn.execute( self.docs.insert(), record )\n end = time.time()\n\n doc_insert_time += (end-start)\n\n except Exception, exc:\n\n if exc.__class__.__name__ != \"IntegrityError\":\n raise\n\n elif self.allow_document_dups:\n\n # It's an integrity error, and duplicates are allowed.\n known_count += 1\n duplicate_docs.add(pubnumber)\n\n # Reset transaction\n transaction.commit()\n transaction = sql_alc_conn.begin()\n continue \n\n else:\n\n raise RuntimeError(\n \"An Integrity error was detected when inserting document {}. This \"\\\n \"indicates insertion of an existing document, but duplicates have been disallowed\".format(pubnumber))\n\n\n doc_id = result.inserted_primary_key[0] # Single PK\n new_doc_mappings[pubnumber] = doc_id\n\n self._extract_detailed_biblio(bib, doc_id, new_classes, new_titles, pubnumber)\n\n # Commit the new document records, then update the in-memory mapping with the new IDs\n transaction.commit()\n self.doc_id_map.update(new_doc_mappings)\n\n logger.info(\"Processed {} document records: {} new, {} duplicates. DB insertion time = {:.3f}\".format( len(chunk[1]), len(new_doc_mappings), known_count, doc_insert_time))\n\n\n ########################################################\n # STEP 2.2: Deal with document overwrites / duplicates #\n ########################################################\n\n if len(overwrite_docs) > 0:\n\n transaction = sql_alc_conn.begin()\n\n # Update the master record for the document that's being overwritten\n stmt = self.docs.update().\\\n where(self.docs.c.id == bindparam('extant_id')).\\\n values(published=bindparam('new_published'), \n family_id=bindparam('new_family_id'), \n life_sci_relevant=bindparam('new_life_sci_relevant'),\n assign_applic=bindparam('new_assign_applic'))\n\n sql_alc_conn.execute(stmt, overwrite_docs)\n\n # Clean out ALL other references to the document, for re-insertion\n delete_ids = [record['extant_id'] for record in overwrite_docs]\n\n stmt = self.titles.delete().where( self.titles.c.schembl_doc_id.in_( delete_ids ) )\n sql_alc_conn.execute( stmt )\n\n stmt = self.classes.delete().where( self.classes.c.schembl_doc_id.in_( delete_ids ) )\n sql_alc_conn.execute( stmt )\n\n stmt = self.chem_mapping.delete().where( self.chem_mapping.c.schembl_doc_id.in_( delete_ids ) )\n sql_alc_conn.execute( stmt )\n\n transaction.commit()\n\n logger.info(\"Overwrote {} duplicate documents (master doc record updated, all other references deleted)\".format(len(overwrite_docs)))\n\n if len(duplicate_docs) > 0:\n self._fill_doc_id_map(duplicate_docs, sql_alc_conn)\n\n logger.info(\"Read {} IDs for duplicate documents\".format(len(duplicate_docs)))\n\n ########################################################\n # STEP 2.3: Bulk insertion of titles / classifications #\n ########################################################\n\n\n # Bulk insert titles and classification\n if self.load_titles:\n title_ins.execute(new_titles)\n logger.debug(\"Insertion of {} titles completed\".format(len(new_titles)) )\n\n if self.load_classifications:\n classes_ins.execute(new_classes)\n logger.debug(\"Insertion of {} classification completed\".format(len(new_classes)) )\n\n # END of main biblio processing loop\n\n # Clean up resources\n title_ins.close()\n classes_ins.close()\n sql_alc_conn.close()\n input_file.close()\n\n logger.info(\"Biblio import completed\" )", "def load(logFile):\n pass #TODO" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load journal entries from a URI.
def load_from_web(journal: Journal, uri: str) -> None:
[ "def load(name):\n jrn_path = build_path(name)\n if not os.path.exists(jrn_path):\n print(f'... journal file \\'{jrn_path}\\' does not exist ...')\n print('... initializing new journal ...')\n with open(jrn_path, 'w') as file:\n pass\n return []\n else:\n print(f'... loading journal entries from {jrn_path} ...')\n journal = []\n with open(jrn_path, 'r') as file:\n for line in file:\n journal.append(line.rstrip())\n print(f'... loaded {len(journal)} items')\n return journal", "def load(journal: Journal, file: Path) -> None:", "def load_entities (self, path=PATH_JOURNALS):\n with codecs.open(path, \"r\", encoding=\"utf8\") as f:\n journals = json.load(f)\n\n # find the next ID to use\n ids = sorted([int(j[\"id\"].replace(\"journal-\", \"\")) for j in journals])\n self.next_id = ids[-1]\n\n # scan for duplicates\n self.known = {}\n\n for journal in journals:\n if \"issn\" in journal:\n for issn in journal[\"issn\"]:\n self.seen_issn.add(issn)\n\n for title in journal[\"titles\"]:\n title_key = title.strip().lower()\n\n if title_key in self.known:\n print(\"DUPLICATE JOURNAL: {}\".format(title))\n else:\n self.known[title_key] = journal", "def loadStoreEntry(entry, entryXmlPath): #$NON-NLS-1$\r\n try:\r\n dom = ZDom()\r\n dom.load(entryXmlPath)\r\n return ZResourceStoreEntryDeserializer().deserialize(entry, dom)\r\n except Exception, e:\r\n raise e", "def fetch_journal(journal_id):\n return fetch_data(\"/journals/%s\" % (journal_id))", "def load_articles():\n\t\n\tlog(\"Reading articles from file: articles_dumped...\")\n\tf = open(os.path.join(logdir, \"articles_dumped\"), 'rb')\n\tdumped = f.read()\n\tf.close()\n\t\n\tarticles = pickle.loads(dumped)\n\t\n\tlog(\"Done!\")\n\tsys.stdout.write(\"Done!\\n\")\n\tsys.stdout.flush()\n\t\n\treturn articles", "def loadFromStream(self, stream, uri=None):\n self.loadFromDom(parseStream(stream))", "def importfeed(url, username):\r\n\r\n user = User.query.filter_by(username=username).first()\r\n if not user:\r\n print \"User %s does not exist\" % username\r\n sys.exit(1)\r\n d = feedparser.parse(url)\r\n for entry in d['entries']:\r\n post = Post(author=user,\r\n title=entry.title[:200],\r\n link=entry.link)\r\n\r\n db.session.add(post)\r\n db.session.commit()", "def load_entities (self, path=PATH_AUTHORS):\n self.known.read_authors(path)", "def _parse(self, journal_txt):\n\n # Entries start with a line that looks like 'date title' - let's figure out how\n # long the date will be by constructing one\n date_length = len(datetime.today().strftime(self.config['timeformat']))\n\n # Initialise our current entry\n entries = []\n current_entry = None\n\n for line in journal_txt.splitlines():\n line = line.rstrip()\n try:\n # try to parse line as date => new entry begins\n new_date = datetime.strptime(line[:date_length], self.config['timeformat'])\n\n # parsing successful => save old entry and create new one\n if new_date and current_entry:\n entries.append(current_entry)\n\n if line.endswith(\"*\"):\n starred = True\n line = line[:-1]\n else:\n starred = False\n\n current_entry = Entry.Entry(self, date=new_date, title=line[date_length+1:], starred=starred)\n except ValueError:\n # Happens when we can't parse the start of the line as an date.\n # In this case, just append line to our body.\n if current_entry:\n current_entry.body += line + \"\\n\"\n\n # Append last entry\n if current_entry:\n entries.append(current_entry)\n for entry in entries:\n entry.parse_tags()\n return entries", "def zhihu_rss_fetcher(ctx):\n URL = 'http://www.zhihu.com/rss'\n coll = ctx.get_mongo_collection()\n\n for entry in fetch_rss(URL).entries:\n try:\n coll.insert({'_id': entry.link})\n except DuplicateKeyError:\n continue\n ctx.new_item(TextOnlyItem(entry.title, entry.description), ['zhihu'],\n parse_entry_time(entry),\n {'id': entry.link})\n log_info(u'zhihu: new entry: {} {}'.format(entry.link,\n entry.title))", "def __init__(self, url=URL):\n self.entries = feedparser.parse(url).entries", "def fetch_entries(self):\n entries = []\n rss_list = self.__data_adapter.fetch_rss()\n for rss in rss_list:\n rss_href = rss.get('url', None)\n if rss_href is not None:\n feed = feedparser.parse(rss_href)\n [entries.append(FeedDocument(entry.get('title', ''), entry.get('summary', ''))) for entry in feed.get('entries', [])]\n return entries", "def atom_feed(cls, uri):\n try:\n category, = cls.search([\n ('unique_name', '=', uri),\n ], limit=1)\n except ValueError:\n abort(404)\n\n feed = AtomFeed(\n \"Articles by Category %s\" % category.unique_name,\n feed_url=request.url, url=request.host_url\n )\n for article in category.published_articles:\n feed.add(**article.serialize(purpose='atom'))\n\n return feed.get_response()", "def _load(self, url):\n file_obj = cache_load(url, self.reload_cache)\n if file_obj is None:\n print(\"did not successfully load '%s'\" % url)\n return\n try:\n term = XMLReader(filename=url, ignore_errors=True).from_file(file_obj)\n term.finalize()\n except ParserException as exc:\n print(\"Failed to load %s due to parser errors\" % url)\n print(' \"%s\"' % exc)\n term = None\n self[url] = term\n return term", "def fetch(feed):\n # Fetch the feed data.\n data = feedparser.parse(feed.ext_url)\n new_articles = []\n\n # If the `bozo` value is anything\n # but 0, there was an error parsing (or connecting) to the feed.\n if data.bozo:\n # Some errors are ok.\n if not isinstance(data.bozo_exception, feedparser.CharacterEncodingOverride) and not isinstance(data.bozo_exception, feedparser.NonXMLContentType):\n raise data.bozo_exception\n\n for entry in data.entries:\n\n # URL for this entry.\n url = entry['links'][0]['href']\n\n # Check for an existing Article.\n # If one exists, skip.\n if Article.objects(ext_url=url).first():\n continue\n\n data = extractor.extract(url, existing_data=entry)\n\n if data is None:\n continue\n\n # Secondary check for an existing Article,\n # by checking the title and source.\n existing = Article.objects(title=data['title']).first()\n if existing and existing.feed.source == feed.source:\n continue\n\n data['feed'] = feed\n\n article = Article(**data)\n article.save()\n new_articles.append(article)\n\n return new_articles", "def read_zotero_localhost(url, omit_indecent_citekey=False, verbose=True):\n try:\n req = requests.get(url)\n except requests.ConnectionError:\n # print(\"ConnectionError: Be sure you have Zotero Standalone running!\", file=sys.stderr)\n # exit()\n exit(\"ConnectionError: Be sure you have Zotero Standalone running!\")\n \n buf = []\n \n for line in req.content.split('\\n'):\n if line.startswith(\"@comment{\"):\n break\n buf.append(line)\n # buf = req.content.split('\\n')\n buf = [line[:-1] if line.endswith('\\r') else line for line in buf] # remove '\\r' in Windows files\n \n list_of_dicts = cut_into_list_of_dicts(buf)\n print(\"Read %d entries from Zotero '%s'\" % (len(list_of_dicts), url))\n \n if omit_indecent_citekey:\n eliminate_indecent_citekeys(list_of_dicts, verbose)\n \n parse_bib(list_of_dicts)\n \n check_duplicate_citekeys(list_of_dicts)\n \n return list_of_dicts", "def load(self, uri):\n self._encoder = load_model(uri+\"_lstm_encoder.hdf5\")\n self._autoencoder = load_model(uri+\"_lstm_autoencoder.hdf5\")\n\n pf = PyFolder(os.path.dirname(os.path.realpath(uri)))\n dict_options = pf[os.path.basename(uri)+\"_options.json\"]\n\n self._latent_space = dict_options['latent_space']\n self._input_cells = dict_options['input_cells']", "def load_history_entries(self, *entries):\n # Simplified version:\n for entry in entries:\n try:\n self[entry.url.host] += [entry]\n except KeyError:\n self[entry.url.host] = [entry]\n \n \n temp_dict = {entry.url.host: [] for entry in entries} \n for entry in entries:\n temp_dict[entry.url.host] += [entry]\n\n # Update the dictionary\n # self.update(temp_dict) # Will override any lists with the same host name\n for host, entry in temp_dict.items():\n #try:\n self[host] += [entry]\n #except IndexError:\n #self[host] = [entry]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Spawning next generation of collection by selecting n pairs of distinct forests from previous generation and them over.
def _next_generation(self, previous_generation): self._fullInput, self._fullOutput = previous_generation.get_data() self.power = self.settings.population_count for forest_iteration in range(self.power): first, second = previous_generation.selection() print 'selected for crossover ->', first.fitness, second.fitness self._forests.append(OneForest(self.settings, first_forest=first, second_forest=second))
[ "def step(self):\n # amt_selected = \\\n # int(self.population_size * self.part_selected) \n\n # spawning_pool = [] # list of dna selected for reproduction\n new_data =[]\n \n sorted_dna = sorted(self.data, \n key=lambda dna: dna.fitness_function(dna),\n reverse=True)\n \n \n \n\n # mutation\n for dna in sorted_dna:\n dna.mute(self.mutation_probability)\n\n # crossover\n while len(new_data) < \\\n self.population_size - (self.population_size % 2):\n\n d1 = copy.copy(self.pick())\n d2 = copy.copy(self.pick())\n times = 2\n for i in range(times):\n d1.crossover(d2)\n\n new_data += [d1, d2]\n\n\n\n\n\n if (self.population_size % 2) == 1:\n new_data.append(copy.deepcopy(self.pick()))\n\n assert(len(self.data) == len(new_data))\n\n for i in range(len(new_data)):\n self.data[i].data = new_data[i]", "def newGeneration(self):\n for i in range(0, len(self.population)):\n #[ind1, ind2] = self.randomSelection()\n [ind1, ind2] = self.bestSelection()\n #ind2 = self.rouletteWheel()\n #ind1 = self.rouletteWheel()\n child = self.uniformCrossover(ind1, ind2)\n #child = self.cycleCrossover(ind1, ind2)\n self.scrambleMutation(child)\n #self.reciprocalExchangeMutation(child)\n self.population[i] = child", "def next_generation(self):\n for i, p in enumerate(self.populations):\n\n # We need to try to do this self.replacements_per_generation times\n\n for t in range(p.no_tournaments):\n\n\n # Tournament selection\n [p1, p2] = p.tournament_selection()\n\n # Cross over\n offspring = p1 + p2\n\n # Mutation\n offspring.mutate()\n\n # Fitness calculation\n self.calc_on_arrays(offspring, i)\n\n # Update pool\n is_replaced = p.replace( offspring )\n\n # If replacement has taken place adjust the number of replacements\n if is_replaced:\n p.no_replacements += 1\n\n p.no_cross_overs += 1\n\n p.generations += 1\n\n self.calc_fitnesses()\n self.choose_best_chromosomes()\n self.generations += 1", "def next_generation(dna_pool, nb_children):\n offsprings = []\n\n for dna in dna_pool:\n for _ in range(nb_children):\n copy = dna.copy()\n i = random.randint(0, len(copy) -1)\n j = random.randint(0, len(copy) -1)\n\n tmp = copy[i]\n copy[i] = copy[j]\n copy[j] = tmp\n\n offsprings.append(copy)\n \n return offsprings + dna_pool", "def produce_next_generation(self):\n size = 5\n start = random.randint(0,self.gene_size-size)\n end = start+size\n length = int(len(self.parents)/2)\n for i in range(length):\n p1 = self.parents[i*2][1]\n p2 = self.parents[i*2+1][1]\n p1copy = copy.deepcopy(self.parents[i*2][1])#[start:end]\n p2copy = copy.deepcopy(self.parents[i*2+1][1])#[start:end]\n p1.genome[start:end] = p2copy.genome[start:end]\n p2.genome[start:end] = p1copy.genome[start:end]\n mut = random.sample(self.gene_pool, len(self.parents))\n\n for gene in mut:\n gene[1].mutate()\n\n # \"Creates\" new children (2 new, 2 parents die (just modifies parents in place into children))", "def _compute_next_gen(self):\n for cell in self.neighbors.keys():\n self._procreate(cell)", "def evolve(self, generations=10000):\n\n for gen in range(generations):\n # run the tournament\n self.tournament()\n\n # generate the next generation\n self.p = self.nextGen()", "def newGeneration(self):\n for i in range(0, len(self.population)):\n [ind1, ind2] = self.randomSelection()\n child = self.crossover(ind1, ind2)\n self.population[i].setGene(child)\n self.mutation(self.population[i])", "def grow_forest( n, records ):\n dataset = Dataset( records )\n record_number = dataset.size\n\n dts = []\n for i in xrange(n):\n print \"Training\", i\n # pick randomly as many records as the number in the dataset.\n picked_records = []\n for j in xrange( record_number ):\n ind_picked = randint(0, record_number-1)\n picked_records.append( dataset[ ind_picked ] )\n picked_records = Dataset( picked_records )\n # train a tree with these records and add it to the forest\n tree = train(picked_records)\n dts.append( tree )\n return dts", "def crossbreed(population):\n tmp = copy.deepcopy(population)\n for i in range(_PAIRS_FOR_CROSSBREED):\n first = select(tmp, True)\n second = select(tmp, True)\n child = rand_crossbreed(first, second)\n population.append(child)", "def round_robin_selection(pop: List[creator.Individual], n_selected: int) -> List[creator.Individual]:\n def tourn(ind1, ind2):\n if ind1.fitness.dominates(ind2.fitness):\n return ind1\n elif ind2.fitness.dominates(ind1.fitness):\n return ind2\n\n if ind1.fitness.crowding_dist < ind2.fitness.crowding_dist:\n return ind2\n elif ind1.fitness.crowding_dist > ind2.fitness.crowding_dist:\n return ind1\n\n if random.random() <= 0.5:\n return ind1\n return ind2\n\n randoms = []\n immigrants = []\n\n for ind in pop:\n if is_immigrant(ind):\n immigrants.append(ind)\n else:\n randoms.append(ind)\n\n r_size = len(randoms)\n i_size = len(immigrants)\n\n if r_size > 4 and i_size > 4:\n whole_1 = random.sample(pop, len(pop))\n whole_2 = random.sample(pop, len(pop))\n\n chosen = []\n for i in range(0, int(n_selected/2), 2):\n assert i_size == len(immigrants), \"immigrants torunament change size\"\n\n chosen.append(tourn(whole_1[i], whole_1[i + 1]))\n chosen.append(tourn(whole_2[i], whole_2[i + 1]))\n\n if i % r_size == 0 or (i+1) % r_size == 0:\n random.shuffle(randoms)\n\n if i % i_size == 0 or (i+1) % i_size == 0:\n random.shuffle(immigrants)\n\n chosen.append(tourn(randoms[i % r_size], randoms[(i+1) % r_size]))\n chosen.append(tourn(immigrants[i % i_size], immigrants[(i+1) % i_size]))\n else:\n whole_1 = random.sample(pop, len(pop))\n whole_2 = random.sample(pop, len(pop))\n\n chosen = []\n for i in range(0, n_selected, 4):\n chosen.append(tourn(whole_1[i], whole_1[i + 1]))\n chosen.append(tourn(whole_1[i + 2], whole_1[i + 3]))\n chosen.append(tourn(whole_2[i], whole_2[i + 1]))\n chosen.append(tourn(whole_2[i + 2], whole_2[i + 3]))\n\n return chosen", "def _create_num_offspring(self, number_of_offspring) -> List[Individual]:\r\n next_pop: List[Individual] = []\r\n #@TODO: comment this to new state\r\n # If the selection type is plus, then it means certain individuals survive to the next generation, so we need\r\n # to grab those first before we create new ones\r\n # if get_ga_constant('selection_type').lower() == 'plus' and len(self._next_pop) < get_ga_constant('num_parents'):\r\n if self.state == States.NEXT_GEN_COPY_PARENTS_OVER:\r\n # Select the subset of the individuals to bring to the next gen\r\n increment = 0 # How much did the offset increment by\r\n for idx in range(self._offset_into_population, len(self.population.individuals)):\r\n # for individual in self.population.individuals[self._offset_into_population: self._offset_into_population + number_of_offspring]:\r\n individual = self.population.individuals[idx]\r\n increment += 1 # For offset\r\n world = self.world\r\n wheel_radii = individual.wheel_radii\r\n wheel_densities = individual.wheel_densities\r\n #wheel_motor_speeds = individual.wheel_motor_speeds\r\n chassis_vertices = individual.chassis_vertices\r\n chassis_densities = individual.chassis_densities\r\n winning_tile = individual.winning_tile\r\n lowest_y_pos = individual.lowest_y_pos\r\n lifespan = individual.lifespan\r\n\r\n # If the individual is still alive, they survive\r\n if lifespan > 0:\r\n car = Car(world, \r\n wheel_radii, wheel_densities,# wheel_motor_speeds, # Wheel\r\n chassis_vertices, chassis_densities, # Chassis\r\n winning_tile, lowest_y_pos,\r\n lifespan)\r\n next_pop.append(car)\r\n # Check to see if we've added enough parents. The reason we check here is if you requet 5 parents but\r\n # 2/5 are dead, then you need to keep going until you get 3 good ones.\r\n if len(next_pop) == number_of_offspring:\r\n break\r\n else:\r\n print(\"Oh dear, you're dead\")\r\n # Increment offset for the next time\r\n self._offset_into_population += increment\r\n # If there weren't enough parents that made it to the new generation, we just accept it and move on.\r\n # Since the lifespan could have reached 0, you are not guaranteed to always have the same number of parents copied over.\r\n if self._offset_into_population >= len(self.population.individuals):\r\n self.state = States.NEXT_GEN_CREATE_OFFSPRING\r\n # Otherwise just perform crossover with the current population and produce num_of_offspring\r\n # @NOTE: The state, even if we got here through State.NEXT_GEN or State.NEXT_GEN_COPY_PARENTS_OVER is now\r\n # going to switch to State.NEXT_GEN_CREATE_OFFSPRING based off this else condition. It's not set here, but\r\n # rather at the end of new_generation\r\n else:\r\n # Keep adding children until we reach the size we need\r\n while len(next_pop) < number_of_offspring:\r\n # Tournament crossover\r\n if get_ga_constant('crossover_selection').lower() == 'tournament':\r\n p1, p2 = tournament_selection(self.population, 2, get_ga_constant('tournament_size'))\r\n # Roulette\r\n elif get_ga_constant('crossover_selection').lower() == 'roulette':\r\n p1, p2 = roulette_wheel_selection(self.population, 2)\r\n else:\r\n raise Exception('crossover_selection \"{}\" is not supported'.format(get_ga_constant('crossover_selection').lower()))\r\n\r\n # Crossover\r\n c1_chromosome, c2_chromosome = self._crossover(p1.chromosome, p2.chromosome)\r\n\r\n # Mutation\r\n self._mutation(c1_chromosome)\r\n self._mutation(c2_chromosome)\r\n\r\n # Don't let the chassis density become <=0. It is bad\r\n smart_clip(c1_chromosome)\r\n smart_clip(c2_chromosome)\r\n\r\n # Create children from the new chromosomes\r\n c1 = Car.create_car_from_chromosome(p1.world, p1.winning_tile, p1.lowest_y_pos, get_ga_constant('lifespan'), c1_chromosome)\r\n c2 = Car.create_car_from_chromosome(p2.world, p2.winning_tile, p2.lowest_y_pos, get_ga_constant('lifespan'), c2_chromosome)\r\n\r\n # Add children to the next generation\r\n next_pop.extend([c1, c2])\r\n\r\n # Return the next population that will play. Remember, this can be a subset of the overall population since\r\n # those parents still exist.\r\n return next_pop", "def _choose_sample(self):\n\n \t #periodically generate a new reconstruction for the purposes of sampling", "def _pop_random_n(entities: np.array, weights: np.array, count: int = 3):\n for _ in range(count):\n if not len(entities):\n return\n\n choice, entities, weights = _pop_random(entities, weights)\n yield choice", "def generate(self, num_leafs):\n leafs = self.get_leafs()\n for _ in range(num_leafs):\n box = leafs[np.random.choice(len(leafs))]\n leafs.remove(box)\n ch0, ch1 = box.split()\n self.add_edge(box, ch0)\n self.add_edge(box, ch1)\n leafs.append(ch0)\n leafs.append(ch1)", "def run(self, n):\n new_trajectories = self.enumerate_trajectories(self.gpm.Graph, n, self.source, self.target, max_iter=self.max_iter)\n self._trajectories += new_trajectories", "def run(self):\r\n if self.parameters_valid == False:\r\n print(\"Error: The run() method cannot be excuted with invalid parameters. \\nPlease check the parameters first.\\n\")\r\n return \r\n \r\n for g in range(self.max_generations):\r\n\r\n fitness = self.calculate_fitness()\r\n parents = self.rank_selection(fitness, self.parents_num)\r\n offspring_crossover = self.crossover(parents, offspring_size=(self.offspring_num, 5))\r\n offspring_mutation = self.mutation(offspring_crossover)\r\n offspring_re = self.rearrange(offspring_mutation, offspring_size=(self.offspring_num, 5))\r\n\r\n # Create a new generation based on the selected parents and the offspring\r\n self.population[0:parents.shape[0], :] = parents\r\n self.population[parents.shape[0]:, :] = offspring_re\r\n\r\n # All the run() methods are done.\r\n self.run_completed = True", "def run_generations(init_len):\n num_graphs = 0\n current_gen = [nx.path_graph(init_len)]\n complete_graph_list = current_gen.copy()\n while len(current_gen) and current_gen[0].size() < (3*init_len - 7):\n current_gen = generation_next(current_gen)\n num_graphs += show_graph_list(current_gen)\n complete_graph_list.extend(filter_bridge_case(current_gen))\n print(num_graphs)\n return complete_graph_list", "def cycles(n, support, randomize=False):\n support = np.array(support)\n\n def gen(p):\n g = combinations(support, n)\n if randomize:\n g = list(g)\n random.shuffle(g)\n\n for local_support in g:\n for output_p in all_permutations(local_support)(p):\n yield output_p\n\n return gen" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Executing every forest in collection, activating their networks. By the way collecting data about best fitness function.
def execute(self): process_list = [] forests_queue = Queue(self.power) iterational = 0 print '| |-starting evaluation, training and validation' for one_forest in self._forests: process_list.append( Process(target=main_async_method, args=(forests_queue, copy(one_forest.to_portal()), iterational, self.settings))) iterational += 1 for proc in process_list: proc.start() for proc in process_list: proc.join() for smth in range(forests_queue.qsize()): tmp = forests_queue.get() self._forests[tmp['place']].fitness = tmp['fitness'] fitness_summ = sum(map(lambda forest: forest.fitness, self._forests)) fss = map(lambda x: x.fitness, self._forests) print 'avg = ', str(sum(fss) / len(fss)), 'max = ', max(fss) self.roulet = map(lambda x: x.fitness / fitness_summ, self._forests)
[ "def mutate(self):\n for forest in self._forests:\n forest.mutate(self._fullInput)", "def run(self, num_iterations = 50, **kwargs):\n \n #setup system\n self.cost_calculator = t.CostCalculator(self.suppliers_allcards, self.all_ensembles_dict)\n bounds = np.array(self.cost_calculator.ensemble_sizes) - 1\n #define cost functions\n cost_func = lambda p: sum(self.cost_calculator.get_cost(p))\n #create model\n self.model = ga(cost_func, bounds, **kwargs)\n \n fitness_list = [];\n \n for i in range(num_iterations):\n #Update\n f = next(self.model)\n #get fitness values\n fitness_list.append(f[0])\n #Output\n print('\\r(%d/%d) '%(i+1,num_iterations), end = '')\n print('top ensemble fitness: %1.1f '%f[0], end = '')\n \n print('\\nDone')\n self.solution = self.cost_calculator.decode_arrangement(self.model.get_solution())", "def learn(self):\n for a in self.agents:\n a.learn()", "def runner(self):\n\n print('[ INFO ]: Initializing the forest fires program runner...')\n\n df, features, predictor = self.preprocess()", "def run_all_classifiers(self):\n\t\tself.classifier_processes.run_all_worker_processes(self.queue)", "def run(self):\n self.test_boards = self.generate_test_boards()\n\n print('running generation', self.tag)\n\n for individual in self.individuals:\n individual.success = individual.evaluate(self.test_boards)\n print('---------')\n print('Network ID:', individual.tag)\n print('Network Score:', individual.score)\n print('Network Age:', individual.age)\n # print('weights:', individual.net._get_weights())", "def apply_neurons(self):\n for neuron in range(self.n_outputs):\n self.uf_activate(neuron)", "def populate_forest_func(forest, root_func, tree_funcs):\n data_arr, out_arr, start_index, end_index = root_func.args\n\n # -- SETUP BLOCK\n setup_block = root_func.append_basic_block(\"setup\")\n builder = ir.IRBuilder(setup_block)\n loop_iter = builder.alloca(INT, 1, \"loop-idx\")\n builder.store(start_index, loop_iter)\n condition_block = root_func.append_basic_block(\"loop-condition\")\n builder.branch(condition_block)\n # -- END SETUP BLOCK\n\n # -- CONDITION BLOCK\n builder = ir.IRBuilder(condition_block)\n comp = builder.icmp_signed(\"<\", builder.load(loop_iter), end_index)\n core_block = root_func.append_basic_block(\"loop-core\")\n term_block = root_func.append_basic_block(\"term\")\n builder.cbranch(comp, core_block, term_block)\n # -- END CONDITION BLOCK\n\n # -- CORE LOOP BLOCK\n builder = ir.IRBuilder(core_block)\n # build args arr, convert categoricals vars from float to int\n args = []\n loop_iter_reg = builder.load(loop_iter)\n\n n_args = ir.Constant(INT, forest.n_args)\n iter_mul_nargs = builder.mul(loop_iter_reg, n_args)\n idx = (builder.add(iter_mul_nargs, iconst(i)) for i in range(forest.n_args))\n raw_ptrs = [builder.gep(root_func.args[0], (c,)) for c in idx]\n for is_cat, ptr in zip(forest.categorical_bitmap, raw_ptrs):\n el = builder.load(ptr)\n if is_cat:\n args.append(builder.fptosi(el, INT_CAT))\n else:\n args.append(el)\n # iterate over each tree, sum up results\n res = builder.call(tree_funcs[0], args)\n for func in tree_funcs[1:]:\n # could be inlined, but optimizer does for us\n tree_res = builder.call(func, args)\n res = builder.fadd(tree_res, res)\n ptr = builder.gep(out_arr, (loop_iter_reg,))\n builder.store(res, ptr)\n tmpp1 = builder.add(loop_iter_reg, iconst(1))\n builder.store(tmpp1, loop_iter)\n builder.branch(condition_block)\n # -- END CORE LOOP BLOCK\n\n # -- TERMINAL BLOCK\n ir.IRBuilder(term_block).ret_void()\n # -- END TERMINAL BLOCK", "def generate_all_activations(self):\n\n for concept in self.concepts:\n self.generate_activation(self.layers, concept)", "def run_all(self):\n # print(\"running all nodes\")\n executed = set()\n node_update_states = {node: node.block_updates for node in self.flow_view.node_items}\n\n def traverse_upwards(node):\n # Traverse upwards to the top of data flow graph\n if node in executed:\n return\n for port in node.inputs:\n for connection in port.connections:\n traverse_upwards(connection.out.node)\n # print(\"executing\", node)\n node.update_event()\n executed.add(node)\n\n for node in self.flow_view.node_items:\n node.block_updates = True\n\n for node in self.flow_view.node_items:\n traverse_upwards(node)\n\n for node in self.flow_view.node_items:\n node.block_updates = node_update_states[node]\n # print(\"All nodes executed\")", "def run(self):\n count = self.neuron_count\n for i in range(0, count):\n self.run(i)", "def main():\n\n print('Breast Cancer')\n cancer = Org('Data/breast-cancer-wisconsin.data', [-1], -1, [-1])\n df = cancer.open()\n # ##NN(file, number hidden layers, number hidden nodes per layer)\n NeuralNet(df, 2, 13, 'classification')\n\n #print('glass')\n #glass = Org('Data/glass.data', [-1], -1, [-1])\n #df = glass.open()\n #NeuralNet(df, 2, 6, 'classification')\n\n #print('soybean')\n #soybean = Org('Data/soybean-small.data', [-1], -1, [-1])\n #df = soybean.open()\n #NeuralNet(df, 2, 11, 'classification')\n\n #print('abalone')\n #abalone = Org('Data/abalone.data', [-1], -1, [0])\n #df = abalone.open()\n #NeuralNet(df, 2, 2, 'regression')\n\n #print('machine')\n #machine = Org('Data/machine.data', [-1], -1, [-1])\n #df = machine.open()\n #NeuralNet(df, 2, 3, 'regression')\n #print(df)\n\n #print('forest')\n #forest = Org('Data/forestfires.data', [0], -1, [2,3])\n #df = forest.open()\n #NeuralNet(df, 0, 3, 'regression')", "def do_recursions(self):\n for iteration in range(self.iterations):\n self.features = self.do_a_recursion()", "def do_recursions(self):\n for _ in range(self.iterations):\n self.features = self.do_a_recursion()", "def train(self):\n for x, y in self.get_data_and_monitor(self):\n graph = self.run(x, y)\n graph.backprop()\n graph.step(self.learning_rate)", "def run_evolutionary_generations(self):\n \n # Evolve the generation.\n for i in range(self.generations):\n logging.info(\"***Doing generation %d of %d***\" %\n (i + 1, self.generations))\n \n self.train_networks(self.networks)\n \n if self.is_classification:\n average_accuracy, highest_accuracy, lowest_accuracy, highest_scoring_network = self.get_accuracy_stats(self.networks) \n \n if highest_scoring_network is not None:\n highest_scoring_network.save_trained_model(os.path.join(self.save_directory, self.dataset + \"_best_network_at_iteration_%d_acc%f\" % (i, highest_accuracy)))\n \n logging.info(\"Generation average: %.2f%%\" % (average_accuracy * 100))\n logging.info(\"Generation best: %.2f%%\" % (highest_accuracy * 100))\n logging.info(\"Generation worst: %.2f%%\" % (lowest_accuracy * 100))\n logging.info('-'*80)\n else:\n average_loss, highest_loss, lowest_loss, best_scoring_network = self.get_loss_stats(self.networks) \n if best_scoring_network is not None:\n best_scoring_network.save_trained_model(os.path.join(self.save_directory, self.dataset + \"_best_network_at_iteration_%d_loss%f\" % (i, lowest_loss)))\n \n logging.info(\"Generation average: %.2f%%\" % (average_loss * 100))\n logging.info(\"Generation best: %.2f%%\" % (highest_loss * 100))\n logging.info(\"Generation worst: %.2f%%\" % (lowest_loss * 100))\n logging.info('-'*80)\n # Evolve, except on the last iteration.\n if i != self.generations - 1:\n self.networks = self.optimizer.evolve(self.networks)\n \n self.save_network_objects(self.networks)\n \n if self.is_classification:\n self.networks = sorted(self.networks, key=lambda x: x.accuracy, reverse=True)\n else:\n self.networks = sorted(self.networks, key=lambda x: x.loss, reverse=False)\n \n self.print_networks(self.networks[:5])\n \n self.save_trained_network_models(self.dataset, self.networks[:5])", "def run(self):\r\n if self.parameters_valid == False:\r\n print(\"Error: The run() method cannot be excuted with invalid parameters. \\nPlease check the parameters first.\\n\")\r\n return \r\n \r\n for g in range(self.max_generations):\r\n\r\n fitness = self.calculate_fitness()\r\n parents = self.rank_selection(fitness, self.parents_num)\r\n offspring_crossover = self.crossover(parents, offspring_size=(self.offspring_num, 5))\r\n offspring_mutation = self.mutation(offspring_crossover)\r\n offspring_re = self.rearrange(offspring_mutation, offspring_size=(self.offspring_num, 5))\r\n\r\n # Create a new generation based on the selected parents and the offspring\r\n self.population[0:parents.shape[0], :] = parents\r\n self.population[parents.shape[0]:, :] = offspring_re\r\n\r\n # All the run() methods are done.\r\n self.run_completed = True", "def main():\n\n print('Breast Cancer')\n print(\"generations: 5 \\n\"\n \"pop = 25 \\n\"\n \"ts = 4 \\n\"\n \"weight = 4 \\n\"\n \"mutations = 10\")\n #print(\"generations: 40 \\n\"\n # \"beta = 1.7 \\n\"\n # \"pop = 25 \\n\"\n # \"pr = .7\")\n cancer = Org('Data/breast-cancer-wisconsin.data', [-1], -1, [-1])\n df = cancer.open()\n # ##NN(file, number hidden layers, number hidden nodes per layer)\n NeuralNet(df, 0, 12, 'classification', 'DE', 20)\n\n #print('glass')\n #glass = Org('Data/glass.data', [-1], -1, [-1])\n #df = glass.open()\n #NeuralNet(df, 2, 6, 'classification', \"DE\", 5)\n\n #print('soybean')\n #soybean = Org('Data/soybean-small.data', [-1], -1, [-1])\n #df = soybean.open()\n #NeuralNet(df, 0, 17, 'classification', \"DE\", 30)\n\n #print('abalone')\n #print(\"generations: 20 \\n\"\n # \"pop = 25 \\n\"\n # \"ts = 4 \\n\"\n # \"weight = 4 \\n\"\n # \"mutations = 30\")\n #abalone = Org('Data/abalone.data', [-1], -1, [0])\n #df = abalone.open()\n #NeuralNet(df, 2, 1, 'regression', 'GA', 25)\n\n print('machine')\n machine = Org('Data/machine.data', [-1], -1, [-1])\n df = machine.open()\n NeuralNet(df, 2, 3, 'regression')\n print(df)\n\n #print('forest')\n #print(\"generations: 20 \\n\"\n # \"pop = 25 \\n\"\n # \"ts = 4 \\n\"\n # \"weight = 4 \\n\"\n # \"mutations = 30\")\n #forest = Org('Data/forestfires.data', [0], -1, [-1])\n #df = forest.open()\n #NeuralNet(df, 0, 3, 'regression', 'BP', 10)", "def execute(self):\n monitor = self.monitor(self.core_task.__name__)\n monitor.oqparam = oq = self.oqparam\n self.src_filter = SourceFilter(self.sitecol, oq.maximum_distance)\n self.nsites = []\n acc = AccumDict({\n grp_id: ProbabilityMap(len(oq.imtls.array), len(gsims))\n for grp_id, gsims in self.gsims_by_grp.items()})\n acc.calc_times = {}\n acc.eff_ruptures = AccumDict() # grp_id -> eff_ruptures\n acc.bb_dict = {} # just for API compatibility\n param = dict(imtls=oq.imtls, truncation_level=oq.truncation_level,\n filter_distance=oq.filter_distance)\n for sm in self.csm.source_models: # one branch at the time\n grp_id = sm.ordinal\n gsims = self.gsims_by_grp[grp_id]\n [[ucerf_source]] = sm.src_groups\n ucerf_source.nsites = len(self.sitecol)\n self.csm.infos[ucerf_source.source_id] = source.SourceInfo(\n ucerf_source)\n ct = self.oqparam.concurrent_tasks or 1\n\n # parallelize by rupture subsets\n rup_sets = numpy.arange(ucerf_source.num_ruptures)\n taskname = 'ucerf_classical_%d' % grp_id\n acc = parallel.Starmap.apply(\n ucerf_classical,\n (rup_sets, ucerf_source, self.src_filter, gsims, monitor),\n concurrent_tasks=ct, name=taskname\n ).reduce(self.agg_dicts, acc)\n\n # parallelize on the background sources, small tasks\n bckgnd_sources = ucerf_source.get_background_sources(\n self.src_filter)\n args = (bckgnd_sources, self.src_filter, gsims, param, monitor)\n bg_res = parallel.Starmap.apply(\n classical, args, name='background_sources_%d' % grp_id,\n concurrent_tasks=ct)\n # compose probabilities from background sources\n for pmap in bg_res:\n acc[grp_id] |= pmap[grp_id]\n\n with self.monitor('store source_info', autoflush=True):\n self.store_source_info(self.csm.infos, acc)\n return acc # {grp_id: pmap}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Just mutating every forest in collection.
def mutate(self): for forest in self._forests: forest.mutate(self._fullInput)
[ "def unifyPreviewNodes(self):\n\n self.leaves.update(self.forced)\n self.forced = set()", "def update(self):\n map(lambda x: x.update(), self._children.values())", "def reset(self):\n for index in self.values():\n index.reset()\n self.objectids = self.family.IF.TreeSet()", "def reset(self):\n self._trees = []", "def update (self) :\n for met in self.gene :\n met(self)", "def update(self, list_of_sets):\n for s in list_of_sets:\n self.add(s)", "def set_features(self, features):\r\n for var in self.features:\r\n self.remove_feature(var)\r\n\r\n for var in features:\r\n self.add_feature(var)", "def _set_node_lists(self, new):\n for edge in self.edges:\n edge._nodes = self.nodes", "def updateCollection():\n \n cl.updColletion()", "def update_collectors(self):\n for collector in self.collectors:\n collector.update()", "def leaf_modify(self, func):\n for key, value in self.leaf_items():\n self[key] = func(value)", "def update(self, iterable):\n self._update_nodes(iterable)", "def mutate(self):\n num_leafs_before = self.num_leafs()\n non_leafs = [v for v, d in self.out_degree() if d > 0]\n box = non_leafs[np.random.choice(len(non_leafs))]\n children = list(self[box])\n for child in children:\n self.remove_subtree(child)\n num_leafs_after = self.num_leafs()\n num_removed = num_leafs_before - num_leafs_after\n self.generate(num_removed)", "def superstep(self):\n for vertex in self.vertices:\n vertex.update()", "def reset(self):\n\n redis_client.delete(self.name)\n current_members = ColorChoice.objects.filter(self.predicate)\n for member in current_members:\n self.update(member)", "def _uncross_reference_sets(self) -> None:\n for set_obj in self.asets:\n set_obj.uncross_reference()\n for set_obj in self.omits:\n set_obj.uncross_reference()\n for set_obj in self.bsets:\n set_obj.uncross_reference()\n for set_obj in self.csets:\n set_obj.uncross_reference()\n for set_obj in self.qsets:\n set_obj.uncross_reference()\n for unused_name, set_objs in self.usets.items():\n for set_obj in set_objs:\n set_obj.uncross_reference()\n\n # superelements\n for unused_key, set_obj in self.se_sets.items():\n set_obj.uncross_reference()\n for set_obj in self.se_bsets:\n set_obj.uncross_reference()\n for set_obj in self.se_csets:\n set_obj.uncross_reference()\n for set_obj in self.se_qsets:\n set_obj.uncross_reference()\n for set_obj in self.se_usets:\n set_obj.uncross_reference()", "def assign_all_materials(self):\r\n flat = self.flatten()\r\n for comp in flat.values():\r\n comp.assign_materials_database()", "def _update_children_lst(self, first: Block, second: Block, third: Block,\n fourth: Block) -> None:\n copy1 = first.create_copy()\n copy2 = second.create_copy()\n copy3 = third.create_copy()\n copy4 = fourth.create_copy()\n self.children[0] = copy1\n self.children[1] = copy2\n self.children[2] = copy3\n self.children[3] = copy4", "def regenerate_collection_string_from_trees( self ):\n self.source = self.get_collection_string()\n self.save( collection_changed = True )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Query a SGL di un sensore del traffico Vedi query_ensor() per sensorURI, fromTime e toTime
def get_traffic_sensor_df(sensorURI: str, fromTime: str, toTime: str, resampleFreq: str = None, remove_outliers=False): values = ["count", "sumSpeed"] result = None for v in values: # data = query_ensor(sensorURI, fromTime, toTime, v) data = multiday_query(sensorURI, fromTime, toTime, v) df = pd.DataFrame(data, columns=["measuredTime", v]) df["measuredTime"] = pd.to_datetime(df["measuredTime"]) df.index = df["measuredTime"] del df["measuredTime"] if remove_outliers: z_scores = np.abs(stats.zscore(df)) print(f"Removed outliers: {df.size - df[(z_scores < 3).all(axis=1)].size}") df = df[(z_scores < 3).all(axis=1)] if resampleFreq is not None: df = df.resample(resampleFreq).sum() if result is not None: result = pd.merge_ordered(result, df, left_on="measuredTime", right_on="measuredTime") result.index = result["measuredTime"] del result["measuredTime"] else: result = df # avg speed result["avgSpeed"] = result["sumSpeed"] / result["count"] result.loc[~np.isfinite(result["avgSpeed"]), "avgSpeed"] = np.nan result["avgSpeed"] = result["avgSpeed"].interpolate() return result
[ "def sensor():\n\n return Sensors(TERRAREF_BASE, 'station1', 'lv1_sensor1',\n stations=STATIONS)", "def requestSensorData(self):\n self._sendSerialMessage('GET_SENSOR', [])", "def read_sensor_wf(table, evt, isens):\n return (table.read_where(\"(event=={}) & (ID=={})\".format(evt, isens),\n field=\"time_mus\"),\n table.read_where(\"(event=={}) & (ID=={})\".format(evt, isens),\n field=\"ene_pes\"))", "def read_sensors():\n previous_time = datetime.datetime.now()\n while True:\n now = datetime.datetime.now()\n delta = now - previous_time\n if delta.seconds >= sample_frequency:\n previous_time = now\n \n # Read SGP30.\n eCO2_data = sgp30.eCO2\n tvoc_data = sgp30.TVOC\n\n # Read VEML6070 and VEML7700, sample ten times.\n for j in range(10):\n light_data = light.lux\n uv_raw = uv.uv_raw\n uv_data = uv.get_index(uv_raw)\n\n # Read BME280.\n temp_data = bme280.temperature\n # Convert temperature (C->F)\n temp_data = temp_data * 1.8 + 32\n humid_data = bme280.humidity\n pressure_data = bme280.pressure\n\n # Write to database\n conn = sqlite3.connect(db)\n curs = conn.cursor()\n curs.execute(\"INSERT INTO data values(?, ?, ?, ?, ?, ?, ?, ?)\",\n (now, temp_data, humid_data, pressure_data, eCO2_data, tvoc_data,\n light_data, uv_data))\n conn.commit()\n conn.close()", "async def list_sensors(self) -> ListSensorsT:\n discovery_item = self.vapix.api_discovery[self.api_id.value]\n return await self.vapix.request2(ListSensorsRequest(discovery_item.version))", "def sensor_history(self, sensor_name, start_time_sec, end_time_sec,\n include_value_ts=False, timeout_sec=0):\n\n if timeout_sec != 0:\n self._logger.warn(\n \"timeout_sec is no longer supported. Default tornado timeout is used\")\n\n params = {\n 'sensor': sensor_name,\n 'start_time': start_time_sec,\n 'end_time': end_time_sec,\n 'limit': MAX_SAMPLES_PER_HISTORY_QUERY,\n 'include_value_time': include_value_ts\n }\n\n url = url_concat(\n (yield self.get_sitemap())['historic_sensor_values'] + '/query', params)\n self._logger.debug(\"Sensor history request: %s\", url)\n response = yield self._http_client.fetch(url)\n data_json = json.loads(response.body)\n if 'data' not in data_json:\n raise SensorHistoryRequestError(\"Error requesting sensor history: {}\"\n .format(response.body))\n data = []\n for item in data_json['data']:\n if 'value_time' in item:\n sample = SensorSampleValueTime(item['sample_time'],\n item['value_time'],\n item['value'],\n item['status'])\n else:\n sample = SensorSample(item['sample_time'],\n item['value'],\n item['status'])\n data.append(sample)\n result = sorted(data, key=_sort_by_sample_time)\n raise tornado.gen.Return(result)", "def getSensors(self, args: str = '') -> Dict[str, Any]:\n demisto.debug('SensorClient.getSensors method has been called.')\n\n return self._http_request(\n method='GET',\n url_suffix='sensors' + args\n )", "def GetSensors(self, x, y, w):\n\n SL = 0\n SR = 0\n Range = 25.0 # Sensor range\n\n for Ob in self.Obs:\n x2 = Ob['x']\n y2 = Ob['y']\n\n # Find the shortest x distance on torus\n if abs(x2 + self.xmax - x) < abs(x2 - x):\n x2 = x2 + self.xmax\n elif abs(x2 - self.xmax - x) < abs(x2 - x):\n x2 = x2 - self.xmax\n\n # Find shortest y distance on torus\n if abs(y2 + self.ymax - y) < abs(y2 - y):\n y2 = y2 + self.ymax\n elif abs(y2 - self.ymax - y) < abs(y2 - y):\n y2 = y2 - self.ymax\n\n dx = x2 - x\n dy = y2 - y\n\n z = np.sqrt(dx**2 + dy**2)\n\n if z < Range:\n v = np.arctan2(dy, dx)\n if v < 0:\n v = 2*np.pi + v\n\n dw = v - w # angle difference between robot's heading and object\n\n # Stimulus strength depends on distnace to object boundary\n S = (Range - z)/Range\n\n if ((dw >= np.pi/8 and dw < np.pi/2) or\n (dw < -1.5*np.pi and dw >= -2*np.pi+np.pi/8)):\n SL = max(S, SL)\n # SL += S\n elif ((dw > 1.5*np.pi and dw <= 2*np.pi - np.pi/8) or\n (dw <= -np.pi/8 and dw > -np.pi/2)):\n SR = max(S, SR)\n # SR += S\n\n return SL, SR", "async def get_sensor_info(self):\n self.sensors = await self.api_get('/aircon/get_sensor_info')", "def get_sensors(self):\n # Use the select function to get all the sensors (effectively a SQL\n # SELECT * FROM... query).\n return DHTSensor.select()", "def wolfram_gps_query(given_stop): \n \n location = ast.literal_eval(given_stop[\"location\"])\n latti = location[0]\n longi = -1 * location[1]\n \n print latti, longi\n \n results = wolfram_client.query(str(latti) + ' deg N, ' + str(longi) + ' deg W')\n \n print results", "def get_sondes(client, start, end):\n\n sonde_query_str = \"SELECT * FROM cfog.sharp_radiosonde \" + \\\n f\"WHERE LaunchTime BETWEEN '{start}' AND '{end}' \" + \\\n \"ORDER BY LaunchTime ASC\"\n\n print(f\"Executing bigquery query string: \")\n print(sonde_query_str + '\\n')\n\n sonde_data = {f\"{s['LaunchTime'].strftime('%m-%d_%H')}\":s for s in client.query(query=sonde_query_str)}\n\n print(\"Radiosondes obtained within the queried time bounds: \")\n print(list(sonde_data))\n\n sonde_data_out = {}\n for t in sonde_data:\n # ignored col: SoundingIdPk, RadioRxTimePk, PtuStatus\n sonde_data_out[t] = {}\n sonde_data_out[t]['df'] = pd.DataFrame({\n 'DataSrvTime' : sonde_data[t]['DataSrvTime'],\n 'Pressure' : sonde_data[t]['Pressure'],\n 'Temperature' : sonde_data[t]['Temperature'],\n 'Humidity' : sonde_data[t]['Humidity'],\n 'WindDir' : sonde_data[t]['WindDir'],\n 'WindSpeed' : sonde_data[t]['WindSpeed'],\n 'WindNorth' : sonde_data[t]['WindNorth'],\n 'WindEast' : sonde_data[t]['WindEast'],\n 'Height' : sonde_data[t]['Height'],\n 'WindInterpolated' : sonde_data[t]['WindInterpolated'],\n 'Latitude' : sonde_data[t]['Latitude'],\n 'Longitude' : sonde_data[t]['Longitude'],\n 'North' : sonde_data[t]['North'],\n 'East' : sonde_data[t]['East'],\n 'Up' : sonde_data[t]['Up'],\n 'Altitude' : sonde_data[t]['Altitude'],\n 'Dropping' : sonde_data[t]['Dropping']\n }\n )\n sonde_data_out[t]['LaunchTime'] = sonde_data[t]['LaunchTime']\n sonde_data_out[t]['LaunchLatitude'] = sonde_data[t]['LaunchLatitude']\n sonde_data_out[t]['LaunchLongitude'] = sonde_data[t]['LaunchLongitude']\n\n print(f\"Query complete. Total number of data entries: {len(sonde_data_out)}.\\n\\n\")\n\n del sonde_data\n return sonde_data_out", "def get_data(last):\n Table = \"ServerRoom\"\n filter = \"\"\n if last == \"lastone\":\n data = request_meteodata(\"SELECT * from `ServerRoom` ORDER BY id DESC LIMIT 1 \")\n if len(data) == 0:\n return [SensorData(datetime.datetime.now(), 0, 0)]\n res = []\n for d in data:\n res.append(SensorData(d[1], d[2], d[3]))\n return res\n if last != \"All\":\n limit = datetime.datetime.now().astimezone(utz)\n if last == \"24hours\":\n limit -= datetime.timedelta(hours=24)\n else:\n limit = limit.replace(hour=0, minute=0, second=0, microsecond=0)\n if last == \"3days\":\n limit -= datetime.timedelta(days=3)\n elif last == \"7days\":\n limit -= datetime.timedelta(days=7)\n elif last == \"month\":\n limit = limit.replace(day=1)\n elif last == \"30days\":\n limit -= datetime.timedelta(days=30)\n elif last == \"year\":\n limit = limit.replace(day=1, month=1)\n filter = \" WHERE `date` > '\" + str(limit) + \"'\"\n order = \" ORDER BY `date` ASC\"\n req = \"SELECT * FROM `\" + Table + \"`\" + filter + order\n data = request_meteodata(req)\n if len(data) == 0:\n print(\"no data: get all\")\n req = \"SELECT * FROM `\" + Table + \"`\" + order\n data = request_meteodata(req)\n res = []\n for d in data:\n res.append(SensorData(d[1], d[2], d[3]))\n return res", "def getSensors(self, sensor_vals):\n distanceShort = 3\n distanceLong = 5\n r, c = self.center\n # sensor_vals = [FL_SR, FC_SR, FR_SR, RT_SR, RB_LR, LT_LR]\n\n # Front Left\n if self.direction == NORTH:\n self.getValue(zip(range(r-distanceShort-1, r-1), [c-1]*distanceShort)[::-1],\n sensor_vals[0], distanceShort, True)\n elif self.direction == EAST:\n self.getValue(zip([r-1]*distanceShort, range(c+2, c+distanceShort+2)),\n sensor_vals[0], distanceShort, True)\n elif self.direction == WEST:\n self.getValue(zip([r+1]*distanceShort, range(c-distanceShort-1, c-1))[::-1],\n sensor_vals[0], distanceShort, True)\n else:\n self.getValue(zip(range(r+2, r+distanceShort+2), [c+1]*distanceShort),\n sensor_vals[0], distanceShort, True)\n\n # Front Center\n if self.direction == NORTH:\n self.getValue(zip(range(r-distanceShort-1, r-1), [c]*distanceShort)[::-1],\n sensor_vals[1], distanceShort, True)\n elif self.direction == EAST:\n self.getValue(zip([r]*distanceShort, range(c+2, c+distanceShort+2)),\n sensor_vals[1], distanceShort, True)\n elif self.direction == WEST:\n self.getValue(zip([r]*distanceShort, range(c-distanceShort-1, c-1))[::-1],\n sensor_vals[1], distanceShort, True)\n else:\n self.getValue(zip(range(r+2, r+distanceShort+2), [c]*distanceShort),\n sensor_vals[1], distanceShort, True)\n\n # Front Right\n if self.direction == NORTH:\n self.getValue(zip(range(r-distanceShort-1, r-1), [c+1]*distanceShort)[::-1],\n sensor_vals[2], distanceShort, True)\n elif self.direction == EAST:\n self.getValue(zip([r+1]*distanceShort, range(c+2, c+distanceShort+2)),\n sensor_vals[2], distanceShort, True)\n elif self.direction == WEST:\n self.getValue(zip([r-1]*distanceShort, range(c-distanceShort-1, c-1))[::-1],\n sensor_vals[2], distanceShort, True)\n else:\n self.getValue(zip(range(r+2, r+distanceShort+2), [c-1]*distanceShort),\n sensor_vals[2], distanceShort, True)\n\n # Right Top\n if self.direction == NORTH:\n self.getValue(zip([r-1]*distanceShort, range(c+2, c+distanceShort+2)),\n sensor_vals[3], distanceShort, True, True)\n elif self.direction == EAST:\n self.getValue(zip(range(r+2, r+distanceShort+2), [c+1]*distanceShort),\n sensor_vals[3], distanceShort, True, True)\n elif self.direction == WEST:\n self.getValue(zip(range(r-distanceShort-1, r-1), [c-1]*distanceShort)[::-1],\n sensor_vals[3], distanceShort, True, True)\n else:\n self.getValue(zip([r+1]*distanceShort, range(c-distanceShort-1, c-1))[::-1],\n sensor_vals[3], distanceShort, True, True)\n\n # Right Bottom\n if self.direction == NORTH:\n self.getValue(zip([r+1]*distanceLong, range(c+2, c+distanceLong+2)),\n sensor_vals[4], distanceLong, False)\n elif self.direction == EAST:\n self.getValue(zip(range(r+2, r+distanceLong+2), [c-1]*distanceLong),\n sensor_vals[4], distanceLong, False)\n elif self.direction == WEST:\n self.getValue(zip(range(r-distanceLong-1, r-1), [c+1]*distanceLong)[::-1],\n sensor_vals[4], distanceLong, False)\n else:\n self.getValue(zip([r-1]*distanceLong, range(c-distanceLong-1, c-1))[::-1],\n sensor_vals[4], distanceLong, False)\n\n # Left Top\n if self.direction == NORTH:\n self.getValue(zip([r-1]*distanceLong, range(c-distanceLong-1, c-1))[::-1],\n sensor_vals[5], distanceLong, False)\n elif self.direction == EAST:\n self.getValue(zip(range(r-distanceLong-1, r-1), [c+1]*distanceLong)[::-1],\n sensor_vals[5], distanceLong, False)\n elif self.direction == WEST:\n self.getValue(zip(range(r+2, r+distanceLong+2), [c-1]*distanceLong),\n sensor_vals[5], distanceLong, False)\n else:\n self.getValue(zip([r+1]*distanceLong, range(c+2, c+distanceLong+2)),\n sensor_vals[5], distanceLong, False)", "def _poll_sensors(conn, cursor):\n conn, c = _get_db_connection()\n\n motion_reading = catnanny.motionsensor()\n temp_reading = catnanny.tempreading()\n\n current_timestamp = datetime.now().isoformat()\n # insert a timestamp, the word motion, and the output from catnanny.motionsensor into sensor_data\n c.execute(\"\"\"INSERT INTO sensor_data VALUES (?, ?, ?)\"\"\", (current_timestamp, 'motion', motion_reading))\n # insert a timestamp, the word temperature, and the output from catnanny.tempreading into sensor_data\n c.execute(\"\"\"INSERT INTO sensor_data VALUES (?, ?, ?)\"\"\", (current_timestamp, 'temperature', temp_reading))\n\n conn.commit()", "def fetch_interval(self,from_day,to_day,sensor=0):\n Data=Dataset(\"Sensor\"+str(sensor))\n while from_day <= to_day:\n self.fetch(d,sensor)\n from_day= from_day+ timedelta(days=1)", "def _read_sensors(self):\n\n # Read differents sensors\n for s in self._sensors_to_read:\n\n if s == 'a':\n # Accelerometer sensor in a non filtered way\n if self._accelerometer_filtered:\n parameters = ('A', 12, '@III')\n\n else:\n parameters = ('a', 6, '@HHH')\n\n self._debug('WARNING: Accelerometer not yet implemented!')\n\n elif s == 'n':\n # Proximity sensors\n res, prox = vrep.simxGetStringSignal(self._clientID, 'EPUCK_proxSens', vrep.simx_opmode_streaming)\n if res != vrep.simx_return_ok:\n self._debug(\"WARNING: Proximity sensors readout failed: \", res)\n else:\n proxVals = vrep.simxUnpackFloats(prox)\n # TODO: Find out actual needed scaling factor\n proxVals = [int(x * 1000) for x in proxVals]\n self._proximity = tuple(proxVals)\n\n elif s == 'm':\n # Floor sensors\n res, floor1 = vrep.simxGetFloatSignal(self._clientID, 'EPUCK_mylightSens_0', vrep.simx_opmode_streaming)\n if res != vrep.simx_return_ok:\n self._debug(\"WARNING: Floor 1 sensor readout failed: \", res)\n res, floor2 = vrep.simxGetFloatSignal(self._clientID, 'EPUCK_mylightSens_1', vrep.simx_opmode_streaming)\n if res != vrep.simx_return_ok:\n self._debug(\"WARNING: Floor 2 sensor readout failed: \", res)\n res, floor3 = vrep.simxGetFloatSignal(self._clientID, 'EPUCK_mylightSens_2', vrep.simx_opmode_streaming)\n if res != vrep.simx_return_ok:\n self._debug(\"WARNING: Floor 3 sensor readout failed: \", res)\n # Scale returned values to mimic real robot; current factor is just guessed\n self._floor_sensors = (floor1 * 1800, floor2 * 1800, floor3 * 1800)\n\n elif s == 'q':\n # Motor position sensor\n # First: Get the handles of both motor joints\n res, leftMotor = vrep.simxGetObjectHandle(self._clientID, 'ePuck_leftJoint',\n vrep.simx_opmode_oneshot_wait)\n if res != vrep.simx_return_ok:\n self._debug(\"WARNING: Unable to get handle of left motor: \", res)\n continue\n res, rightMotor = vrep.simxGetObjectHandle(self._clientID, 'ePuck_rightJoint',\n vrep.simx_opmode_oneshot_wait)\n if res != vrep.simx_return_ok:\n self._debug(\"WARNING: Unable to get handle of right motor: \", res)\n continue\n\n # Second: Get the actual motor position (in radians)\n res, leftPos = vrep.simxGetJointPosition(self._clientID, leftMotor, vrep.simx_opmode_oneshot_wait)\n if res != vrep.simx_return_ok:\n self._debug(\"WARNING: Readout of left motor failed: \", res)\n continue\n res, rightPos = vrep.simxGetJointPosition(self._clientID, rightMotor, vrep.simx_opmode_streaming)\n if res != vrep.simx_return_ok:\n self._debug(\"WARNING: Readout of left motor failed: \", res)\n continue\n\n self._motor_position = (leftPos, rightPos)\n\n elif s == 'o':\n # Light sensors\n parameters = ('O', 16, '@HHHHHHHH')\n self._debug('WARNING: Light sensors not yet implemented!')\n\n elif s == 'u':\n # Microphone\n parameters = ('u', 6, '@HHH')\n self._debug('WARNING: Microphones not yet implemented!')\n\n elif s == 'e':\n # Motor Speed\n parameters = ('E', 4, '@HH')\n self._debug('WARNING: Motor speed not yet implemented!')\n\n elif s == 'i':\n # Do nothing for the camera, is an independent process\n pass\n\n else:\n self._debug('Unknow type of sensor to read')", "def uerra_eswi_request(requestDates, target, var, model):\n server.retrieve({\n \"class\": \"ur\", \n \"dataset\": \"uerra\",\n \"date\": requestDates,\n \"expver\": \"prod\",\n \"levtype\": \"sfc\",\n \"origin\": gcm_code, # eswi is Harmonie / egrr is UM\n \"param\": var, \n \"number\": str(model), #\"0/1/2/3/4/5/6/7/8/9/10/11/12/13/14/15/16/17/18/19/20\",\n \"step\": \"1/2/3/4/5/6\",\n \"stream\": type_of_model, # enda for COSMO, oper for Harmonie, UM\n \"time\": \"00/06/12/18\",\n \"type\": 'fc', \n \"target\": target\n })", "def update(self):\n self.cursor.execute(\"\"\"SELECT * FROM sensors_powersensor\"\"\")\n list = self.cursor.fetchall()\n for sensor in list:\n self.add(sensor[2], sensor[1])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot time points given in data file and compare to x3
def plot_data(fname): if not os.path.isfile(fname): print('No data has been generated yet, aborting...') sys.exit(1) with open(fname, 'r') as fd: data = json.load(fd) x = np.arange(0, max(data, key=lambda e: e[0])[0], 1) const = .55e-8 func = lambda x: const * x**3 plt.plot( *zip(*data), label=r'ShRec3D data points', linestyle='None', marker='h' ) plt.plot(x, func(x), label=r'$ %.0e \cdot x^3$' % const) plt.title(r'Complexity ($\in \Theta\left(x^3\right)$) visualization of ShRec3D') plt.xlabel('loci number') plt.ylabel('execution time (seconds)') plt.legend(loc='best') plt.savefig('time_comparison.png', dpi=300, bbox_inches='tight') plt.show()
[ "def visualize_time_data(time, index=None):\n length = time[0].shape[0]\n index = np.array(index or range(length))\n time_x = time[0].iloc[index]\n time_y = time[1].iloc[index]\n for key,val in eval(CFGS[\"DATA\"][\"PLOTTIMECOL\"]).items():\n for i in range(val[0], val[1]+1):\n idx = time_x[key] == i\n plt.plot(index[idx], time_y[idx], \"--\")\n plt.title(key)\n # plt.legend([\"springer\", \"summer\", \"fall\", \"winter\"])\n plt.show()", "def example3():\n arrive_time=example2() # Get packets arrive time using example1\n time_series.plot_time_series(arrive_time) # Plot time series using packets arrive time", "def plot_time(self, X, x0, t):\n\n Pressure = [Solution(self, (x-x0)/t).pressure for x in X]\n Velocity = [Solution(self, (x-x0)/t).velocity for x in X]\n Density = [Solution(self, (x-x0)/t).rho for x in X]\n\n fig, axs = plt.subplots(3, sharex=True)\n fig.suptitle(\"Solution of the Riemann problem\\nat t = {}s\".format(t))\n axs[0].plot(X, Density)\n axs[1].plot(X, Velocity)\n axs[2].plot(X, Pressure)\n\n axs[0].grid()\n axs[0].set(ylabel = \"Density\")\n axs[1].grid()\n axs[1].set(ylabel = \"Velocity\")\n axs[2].grid()\n axs[2].set(ylabel = \"Pressure\")\n\n plt.xlabel(\"Location x\")", "def plot_and_save_3d(file_name, path_name, raw_data_file, show=False):\n print '-'*23+'PLOT (3d)'+'-'*24\n \n print 'Loading force data...', \n data = load_file(path_name+file_name)\n t = data['t']\n dyn = 1.0\n \n pic_path = path_name+'pics/'\n if not os.path.exists(pic_path):\n os.makedirs(pic_path)\n print 'done'\n print 'Creating and saving plots...', \n\n # x-moment\n plt.figure(1)\n plt.plot(t, dyn*data['dyn']['MX'], t, data['static']['MX'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Mx')\n plt.title('Moment (x)')\n plt.grid()\n plt.savefig('%sMx.png' %pic_path)\n\n # y-moment\n plt.figure(2)\n plt.plot(t, dyn*data['dyn']['MY'], t, data['static']['MY'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('M')\n plt.title('Moment (y)')\n plt.grid()\n plt.savefig('%sMy.png' %pic_path)\n\n # z-moment\n plt.figure(3)\n plt.plot(t, dyn*data['dyn']['MZ'], t, data['static']['MZ'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Mz')\n plt.title('Moment (z)')\n plt.grid()\n plt.savefig('%sMz.png' %pic_path)\n \n # x-force\n plt.figure(4)\n plt.plot(t, dyn*data['dyn']['FX'], t, data['static']['FX'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Fx')\n plt.title('Fx')\n plt.grid()\n plt.savefig('%sFx.png' %pic_path)\n\n # y-force\n plt.figure(5)\n plt.plot(t, dyn*data['dyn']['FY'], t, data['static']['FY'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Fy')\n plt.title('Fy')\n plt.grid()\n plt.savefig('%sFy.png' %pic_path)\n\n # z-force\n plt.figure(6)\n plt.plot(t, dyn*data['dyn']['FZ'], t, data['static']['FZ'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Fz')\n plt.title('Fz')\n plt.grid()\n plt.savefig('%sFz.png' %pic_path)\n print 'done'\n\n #nice_looking_plots(t, data['dyn'], data['static'])\n\n if show:\n plt.show()", "def load_and_plot_data(fname):\n data = np.loadtxt(fname)\n tgt_sleep = data[:, 0]\n measured_sleep = data[:, 1]\n plot_data(tgt_sleep, measured_sleep)", "def test_plot_time_data():\n fig, ax = GlobalData.plot_time_data(timeStart=-1e-3, timeEnd=1e-3, units='ms', show_fig=False)\n return fig", "def time_series_plot(ax,data):\n #plot options\n width = 0.015\n linewidth = 0.1\n pcolor_plot = True\n \n # read main inputs\n xx = data['xx'][:]\n val = data['val'][:]\n #\n var = data['var']\n lim = data['lim']\n #\n if 'xlab' in data.keys():\n xlab = data['xlab']\n else:\n ax.set_xlabel('X[m]')\n \n if 'xlab' in data.keys():\n title = data['title']\n else:\n title = ' '\n #\n #if not isinstance(xx[0], datetime.datetime.date):\n # xmin = lim['xmin']\n # xmax = lim['xmax']\n # ax.set_xlim(xmin,xmax)\n \n ax.plot(xx,val ,'k-o',label=var['label'])\n #plt.plot(x2[0],data,'k-o',label='data' )\n leg=ax.legend(loc='best')\n try:\n frame=leg.get_frame()\n frame.set_edgecolor('None')\n frame.set_facecolor('None')\n except:\n pass\n ax.set_title(title)\n\n # the linewidth of the rectangular axis frame\n fr_linewidth=0.4\n [i.set_linewidth(fr_linewidth) for i in ax.spines.itervalues()]\n \n #ax.xaxis.set_ticks(ticks=range(len(point_list))) \n #ax.xaxis.set_ticklabels(ticklabels=point_list) #,fontsize=18)\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_rotation(30)\n\n ax.set_ylim(var['vmin'],var['vmax'])\n \n ax.set_ylabel(var['label']) \n \n ax.grid('off')\n #if not ax.is_last_row():\n # plt.setp( ax, 'xticklabels', [] )\n # ax.set_xlabel('')\n #if not plt.gca().is_first_col():\n # plt.setp( plt.gca(), 'yticklabels', [] )\n # plt.ylabel('') \n #print ' > plot line'\n return", "def data_vis():\n dataroot = 'solar_data.txt'\n debug = False \n diff = False\n X, y = read_data(dataroot, debug, diff)\n\n # First plot the original timeseries\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(y)\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(X[:,0])\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(X[:,1])\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(X[:,2])\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(X[:,3])\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(X[:,4])\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(X[:,5])\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##########################################################################################\n # Plotting the Fourier Transform of the signals\n\n freq = np.fft.fftfreq(len(y), 1*60*60)\n\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(freq, np.abs(np.fft.fft(y)))\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(freq, np.abs(np.fft.fft(X[:,0])))\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(freq, np.abs(np.fft.fft(X[:,1])))\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(freq, np.abs(np.fft.fft(X[:,2])))\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(freq, np.abs(np.fft.fft(X[:,3])))\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(freq, np.abs(np.fft.fft(X[:,4])))\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(freq, np.abs(np.fft.fft(X[:,5])))\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##################################################################################################\n # Print correlation matrix\n\n df = pd.DataFrame(np.c_[y, X])\n df.columns = ['Avg Global PSP (vent/cor) [W/m^2]','Avg Zenith Angle [degrees]','Avg Azimuth Angle [degrees]','Avg Tower Dry Bulb Temp [deg C]','Avg Tower RH [%]','Avg Total Cloud Cover [%]','Avg Avg Wind Speed @ 6ft [m/s]']\n f = plt.figure(figsize=(19, 15))\n plt.matshow(df.corr(), fignum=f.number)\n plt.xticks(range(df.shape[1]), df.columns, fontsize=14, rotation=20)\n plt.yticks(range(df.shape[1]), df.columns, fontsize=14)\n cb = plt.colorbar()\n cb.ax.tick_params(labelsize=14)\n plt.title('Correlation Matrix', fontsize=16);\n plt.show()", "def plot_x(t, x):\n plt.figure()\n plt.plot(t, x)\n plt.title(\"Vertical position of the skydiver as a function of time\")\n plt.xlabel(\"Time t [s]\")\n plt.ylabel(\"Height [m]\")\n plt.savefig('Parachute_position.png')", "def real_time_plot(files):\n global len_data, first_iter, colors\n\n for i,F in enumerate(files):\n\n # Load data\n data = pylab.loadtxt(F, delimiter=',', skiprows=1, usecols=(5,6,7))\n\n # Check if new data\n if (len_data!= len(data[:,0])):\n\n # Plot\n label = ntpath.basename(F)\n label = label[0:-4]\n ax.plot(data[:,0], data[:,1], data[:,2], colors[i], label=label)\n\n pyplot.draw()\n\n # Update globals\n len_data = len(data[:,0])\n\n if (first_iter == True):\n ax.legend()\n first_iter = False", "def plot_temp():\r\n work_book = xlrd.open_workbook(\"Temp.xls\")\r\n sheet1 = work_book.sheet_by_name(\"Temperature\")\r\n time_x = sheet1.col_values(1)\r\n temp_y = sheet1.col_values(0)\r\n plt.title(\"Time\")\r\n plt.xlabel(\"Time\")\r\n plt.ylabel(\"Temperature\")\r\n plt.plot(time_x, temp_y)\r\n plt.show()", "def loadAndPlot1DMassData(dataFile='movingPointMassData/testPointMassData000.pkl'):\n # Load the data back\n inputDataFile = open(dataFile, \"rb\")\n dataOut = pickle.load(inputDataFile)\n inputDataFile.close()\n # Iterate over the different saved trajectores and plot out the results.\n for i in range(len(dataOut[0])):\n plt.figure(i)\n plt.plot(dataOut[0][i][1],dataOut[0][i][0])\n plt.show()", "def plotData(i):\n global xs, ys \n data = readFromSerial()\n try:\n #if len(data) > 1:\n # Extract the element from the list\n strToSplit = data[0]\n # Separate the time and sensor data\n x, y = strToSplit.split(',')\n\n # The data must me plotted as float not as a string!\n xs.append(float(x))\n ys.append(float(y))\n\n # Clean everything before we plot\n ax1.clear()\n\n # Had to set the names here because in the initialization they would not be permanent \n ax1.set_xlabel('Time (ms)')\n ax1.set_ylabel('Sensor')\n ax1.plot(xs,ys)\n except(ValueError):\n print(\"Retrieved the data in a wrong manner. Run it one more time\")\n sys.exit(0)", "def plotTime(data,rate):\n t = np.arange(len(data))*1.0/rate\n \n #Plot time domain\n pl.plot(t, data)\n pl.ylabel(\"Amplitude\")\n pl.xlabel(\"Time(s)\")\n pl.show()", "def plot_time_delay_3d(x0, delta_n):\n plt.figure(figsize=(5, 5))\n points_len = x0.shape[0]\n # shift the data\n x0_t = x0[:points_len - 2 * delta_n]\n x0_t_delta_t = x0[delta_n: points_len - delta_n]\n x0_t_delta_2t = x0[delta_n * 2: points_len]\n\n fig = plt.figure(figsize=(7, 7))\n ax = fig.gca(projection='3d')\n\n ax.plot(x0_t, x0_t_delta_t, x0_t_delta_2t, linewidth=0.5,\n label=\"time delay = {0}\".format(delta_n))\n ax.legend()\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n ax.set_zlabel(\"z\")\n plt.show()", "def coordinate_vs_time_plotter(array, xyz_axis=0, bird=0, axis_of_time_steps=2, start=0., end=1.):\r\n y_values = array[bird, xyz_axis, :]\r\n x_values = get_time_array(array, axis_of_time_steps, start, end)\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot()\r\n\r\n if xyz_axis == 0:\r\n ax.set_ylabel('X (m)')\r\n elif xyz_axis == 1:\r\n ax.set_ylabel('Y (m)')\r\n elif xyz_axis == 2:\r\n ax.set_ylabel('Z (m)')\r\n else:\r\n print(\"That is not a valid axis choice. Please choose one of: 0, 1, 2\")\r\n ax.set_xlabel('Time (s)')\r\n ax.scatter(x_values, y_values)\r\n return fig.show()", "def plot_data(self):", "def plot_data(self, axis):\n alpha = 0.5\n if 'c3' in self.nbd_sites:\n axis.plot(self.options.tspan, self.nbd_avgs[0], 'r.',\n label='c3 data', alpha=alpha)\n if 'c62' in self.nbd_sites:\n axis.plot(self.options.tspan, self.nbd_avgs[1], 'g.',\n label='c62 data', alpha=alpha)\n #plt.plot(nbd.time_other, nbd_avgs[2], 'b.', label='c120 data',\n # alpha=alpha)\n #plt.plot(nbd.time_other, nbd_avgs[3], 'm.', label='c122 data',\n # alpha=alpha)\n #plt.plot(nbd.time_other, nbd_avgs[4], 'k.', label='c126 data',\n # alpha=alpha)", "def visualize_3dim_state(time, tv, pca_x):\n ti = np.argmax(tv >= time)\n ax = plt.figure(figsize=(10,10)).add_subplot(111, projection='3d');\n ax.scatter(pca_x[0][ti], pca_x[1][ti], pca_x[2][ti], c='red');\n plt.plot(pca_x[0][:ti], pca_x[1][:ti], pca_x[2][:ti]);\n ax.set_xlabel('PCA 1')\n ax.set_ylabel('PCA 2')\n ax.set_zlabel('PCA 3')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Push the item in the front of the deque
def enqueue_front(self, item): self._items.insert(0, item)
[ "def push_front(self, val):\r\n self.deque.insert(0, val)", "def push_front(self, item):\n self.list.prepend(item)", "def push_front(self, e):\n if(self.size_ >= self.capacity_):#If our Deque is full we need to resize it first\n self.resize_front()\n self.data_[self.front_]= e#New Front\n self.size_+=1\n # print(\"Case 1\")\n elif(self.front_ == -1 and self.size_ ==0) :#If the Deque is intially empty then when we add the first item that will be both the front and the back \n self.front_= 0\n self.back_ = 0\n self.data_[self.front_]= e #Inserting First element in deque either front end or rear end they both lead to the same result.\n self.size_+=1\n # print(\"Case 2\")\n elif (self.front_ ==0):#If the front is at the beginning of the Deque.This may happen after the first insertion.\n self.front_-=1\n self.data_[self.front_] = e\n self.size_+=1\n # print(\"Case 3\")\n else:\n self.front_ -=1 #We add normally \n self.data_[self.front_] = e\n self.size_+=1\n #print(\"Case 4\")", "def insertFront(self, value):\n if self.isFull():\n return False\n self.deque.insert(0,value)\n self.length += 1\n return True", "def insert_front(self, value):\r\n\r\n self._front = _DequeNode(value, _prev=self._front,_next=self._front)\r\n if self._count > 0:\r\n self._front._prev._next = self._front\r\n else: \r\n self._rear = self._front\r\n\r\n self._count += 1\r\n \r\n\r\n return", "def insertFront(self, item):\n self.sentinel.insertAfter(item)\n self.N += 1", "def append_front(self, item):\n\n self.front = Node(item, self.front)", "def add_to_front(self, val):\n pass", "def addFront(self, item):\n self.items.append(item)", "def enqueue(self, value):\n old_back = self.back\n self.back = Node(value)\n if self.size() > 0: # if any Nodes: set back previous to current Node\n old_back.point_previous = self.back\n else: # adding to an empty, than define front\n self.front = self.back\n self.count += 1", "def push_front(self, val):\n new_node = Node(val, self.head)\n if self.is_empty():\n self.tail = new_node\n self.head = new_node\n self.size += 1", "def push_back(self, e):\n if(self.size_ >= self.capacity_):#If our Deque is full we need to resize it first\n self.resize_back()\n self.back_+=1\n self.data_[self.back_]= e\n self.size_+=1\n #print(\"case 1\")\n elif (self.front_ == -1 and self.size_==0):#If the Deque is intially empty then when we add the first item that will be both the front and the back \n self.front_= 0\n self.back_=0\n self.data_[self.back_]= e\n self.size_+=1\n else:#The Back is not at the first index(possibly somewhere in between) and if we push back it we have to go up by one to move to the new back\n self.back_+=1\n self.data_[self.back_] =e \n self.size_+=1", "def left_enqueue(self, item):\n item_to_add = Node(item)\n item_to_add.set_next(self.head)\n\n # if the deque is empty, the new item is the tail\n if not self.tail:\n self.tail = item_to_add\n else:\n # connect the old head to the new head\n self.head.set_prev(item_to_add)\n\n # set the new node as the head\n self.head = item_to_add\n self.size += 1", "def push_front(self, value):\n new_node = self.Node(value)\n\n # Edge Case : List is empty\n if self._size == 0:\n self._tail = new_node\n self._head = new_node\n self._size += 1\n return\n\n new_node.next = self._head\n self._head.prev = new_node\n self._head = new_node\n self._size += 1", "def push_front(self, val : Any):\n\n if (self.size == self.capacity):\n raise IndexError(\"Capacity of LinkedList_Array has been exceeded\")\n\n for i in range (self.size,0,-1):\n self.array[i] = self.array[i-1]\n \n self.array[0] = val\n self.size += 1", "def insertFront(self, value: int) -> bool:\n if self.curSize == self.maxSize:\n return False\n self.deque = [value] + self.deque\n self.curSize += 1\n return True", "def enqueue(self, val):\n self.list.insert_last(val)", "def insert_front(self, item):\n if ((not self.item_matches_vector_type(item)) and (self.typesafe == True)):\n raise TypeError(\"An item was added to the vector with an incompatible type\")\n else:\n temp = Node(item)\n temp.next = self.sentinel.next\n temp.prev = self.sentinel\n self.sentinel.next.prev = temp\n self.sentinel.next = temp\n self.size += 1", "def push(self, item):\n # TODO: Push given item\n # So we know that the push method is to insert an item at the top of the stack and we know the top of a linked\n # list is at the head that is why they call it the head therefore we can insert at index 0 or rather what is\n # easier what if prepend to the list\n self.list.prepend(item)\n # The reason we do this over inserting at index 0 is due to the fact that we want to be able to just easily put it\n # in the beginning of the list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pop the item in the front of the deque. Raise IndexError if the deque is empty.
def dequeue_front(self): try: return self._items.pop(0) except: raise IndexError('The deque is empty')
[ "def pop_back(self):\r\n if self.size():\r\n self.deque.pop(-1)\r\n else:\r\n raise IndexError(\"Deque is empty.\")", "def pop(self):\r\n try:\r\n return self.pop_from_deque()\r\n except IndexError:\r\n return None", "def dequeue_rear(self):\n try:\n return self._items.pop()\n except:\n raise IndexError('The deque is empty')", "def remove_front(self):\r\n assert self._front is not None, \"Cannot remove from an empty deque\"\r\n \r\n value = self._front\r\n self._front = self._front._next\r\n \r\n self._count -= 1\r\n if self._count == 0:\r\n self._rear = None\r\n else:\r\n self._front._prev = None\r\n return value._value", "def dequeue(self): \n #if queue is empty\n if self.is_empty():\n raise IndexError(\"Deletion is not Possible Because Queue is Empty\")\n else:\n item = self.items[0]\n del self.items[0] # deleting front element\n return item", "def remove_front(self):\r\n assert self._front is not None, \"Cannot remove from an empty dequeue\"\r\n\r\n value = self._front\r\n \r\n self._front = self._front._prev\r\n self._count -= 1\r\n if self._count == 0:\r\n self._front = None\r\n\r\n return value", "def pop(self):\n try:\n return self._values.pop()\n except IndexError:\n raise IndexError('Cannot pop from empty deque.')", "def removeFront(self):\n if self._size == 0:\n raise AttributeError(\"Cannot removeFront from an empty Deque\")\n \n temp = self._front\n self._front = self._front.getPrevious()\n if self._size == 1:\n # removing only item which is the rear as well as the front item\n self._rear = None\n else:\n self._front.setNext(None)\n self._size -= 1\n \n return temp.getData()", "def dequeue(self):\n # TODO: Remove and return front item, if any\n # find top_item\n if self.list.length() != 0:\n front = self.list.get_at_index(0)\n # delete top_item\n self.list.delete(front)\n return front\n else:\n raise ValueError('Queue is Empty')", "def dequeue(self):\n if self.size() < 1:\n raise ValueError('Priority queue is empty and has no front item')\n else:\n # TODO: Remove and return min item from heap, if any\n ...", "def pop(self):\n current_node = self.container.head\n if current_node is not None:\n while current_node.next_node is not None:\n current_node = current_node.next_node\n if current_node == self.container.head:\n self.container.head = None\n self.length -= 1\n return current_node.value\n else:\n try:\n current_node.prev.next_node = None\n except AttributeError:\n print('The Deque is empty')\n raise AttributeError", "def remove(self, index):\n if index < 0 or index >= len(self):\n raise AttributeError(\"i must be >= 0 and < size of queue\")\n if index == 0:\n oldItem = self._front.data\n self._front = self._front.next\n else:\n probe = self._front\n while index > 1:\n probe = probe.next\n index -= 1\n oldItem = probe.next.data\n probe.next = probe.next.next\n self._size -= 1\n if self.isEmpty():\n self._rear = None\n return oldItem", "def dequeue(self) -> object:\n # to handle empty queue\n if self.da.is_empty():\n raise QueueException\n\n beginning_index = 0\n beginning_value = self.da.get_at_index(beginning_index)\n self.da.remove_at_index(beginning_index)\n return beginning_value", "def pop_from_deque(self):", "def pop_front(self):\n value = self.front.value\n self.delete_front()\n return value", "def dequeue(self):\n try:\n val = self.front.value\n self.front = self.front.point_previous\n except AttributeError:\n raise AttributeError(u\"Queue is empty\")\n self.count -= 1\n return val", "def pop(self, pos=None):\n if pos is None:\n pos = 0\n if 0 <= pos < self.size:\n index = 0\n curr = self.head\n prev = None\n while index != pos:\n prev = curr\n curr = curr.get_next()\n index += 1\n if prev is not None:\n prev.set_next(curr.get_next())\n else:\n self.head = curr.get_next()\n self.size -= 1\n return curr.get_data()\n else:\n raise IndexError('Position {} out of range'.format(pos))", "def dequeue(self): \n if self.is_empty():\n raise self.EmptyError('Queue empty')\n elem = self._queue[self._head] # element to be returned\n self._queue[self._head] = None # garbage collection\n self._head = (self._head + 1) % len(self._queue) # advance head index\n self._size -= 1\n return elem", "def pop(self, index: int) -> object:\n # What if you popped the first item, or the the last in the LL??\n # have to change the front and back\n if index < 0:\n index += self.size\n if index < 0 or index >= self.size or self.size == 0:\n raise IndexError\n\n if index == 0:\n value = self.front.value\n self.delete_front()\n return value\n elif index == self.size - 1:\n value = self.back.value\n self.delete_back()\n return value\n\n curr_node = self.front\n prev_node = self.front\n curr_i = 0\n while curr_node is not None and curr_i < index:\n prev_node = curr_node\n curr_node = curr_node.next_\n curr_i += 1\n\n assert curr_node is not None\n if curr_i == index: # Not necessary\n value = curr_node.value\n prev_node.next_ = curr_node.next_ # or prev_node.next_.next_\n self.size -= 1\n return value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pop the item in the end of the deque. Raise IndexError if the deque is empty.
def dequeue_rear(self): try: return self._items.pop() except: raise IndexError('The deque is empty')
[ "def pop_back(self):\r\n if self.size():\r\n self.deque.pop(-1)\r\n else:\r\n raise IndexError(\"Deque is empty.\")", "def pop(self):\r\n try:\r\n return self.pop_from_deque()\r\n except IndexError:\r\n return None", "def pop(self):\n try:\n return self._values.pop()\n except IndexError:\n raise IndexError('Cannot pop from empty deque.')", "def dequeue_front(self):\n try:\n return self._items.pop(0)\n except:\n raise IndexError('The deque is empty')", "def pop(self):\n current_node = self.container.head\n if current_node is not None:\n while current_node.next_node is not None:\n current_node = current_node.next_node\n if current_node == self.container.head:\n self.container.head = None\n self.length -= 1\n return current_node.value\n else:\n try:\n current_node.prev.next_node = None\n except AttributeError:\n print('The Deque is empty')\n raise AttributeError", "def dequeue(self): \n #if queue is empty\n if self.is_empty():\n raise IndexError(\"Deletion is not Possible Because Queue is Empty\")\n else:\n item = self.items[0]\n del self.items[0] # deleting front element\n return item", "def pop(self, pos=None):\n if pos is None:\n pos = 0\n if 0 <= pos < self.size:\n index = 0\n curr = self.head\n prev = None\n while index != pos:\n prev = curr\n curr = curr.get_next()\n index += 1\n if prev is not None:\n prev.set_next(curr.get_next())\n else:\n self.head = curr.get_next()\n self.size -= 1\n return curr.get_data()\n else:\n raise IndexError('Position {} out of range'.format(pos))", "def pop_item(self, index):\n ix, obj = self.items\n if index < len(ix):\n self.d_buffer.pop(ix[index])\n else:\n raise IndexError('Buffer does not have {0} elements'.format(index))", "def pop(self, pos=None):\n \n if self.is_empty():\n raise IndexError('pop from empty list')\n \n if pos is None:\n pos = self.length() - 1\n \n elif pos >= self.length():\n raise IndexError('pop index out of range')\n \n previous = None\n current = self.head\n \n for _ in range(pos):\n previous = current\n current = current.get_next()\n \n # If the item to be removed is the first item\n if pos == 0:\n self.head = current.get_next()\n else:\n previous.set_next(current.get_next())\n \n return current.get_data()", "def pop_from_deque(self):", "def remove(self, index):\n if index < 0 or index >= len(self):\n raise AttributeError(\"i must be >= 0 and < size of queue\")\n if index == 0:\n oldItem = self._front.data\n self._front = self._front.next\n else:\n probe = self._front\n while index > 1:\n probe = probe.next\n index -= 1\n oldItem = probe.next.data\n probe.next = probe.next.next\n self._size -= 1\n if self.isEmpty():\n self._rear = None\n return oldItem", "def pop(self, index: int = -1) -> Any:\n if not isinstance(index, int):\n raise TypeError(\"Index must be an integer\")\n if index == -1 or index >= self.size:\n returnval = self.elements[self.itemcount - 1]\n else:\n returnval = self.elements[index]\n for i in range(index, self.itemcount):\n self.elements[i] = self.elements[i + 1]\n self.itemcount -= 1\n self.elements[self.itemcount] = None\n return returnval", "def pop(self):\n try:\n if self.size() > 0:\n top = self.top()\n self.items.pop()\n return top\n else:\n raise IndexError('Cannot pop item, stack is empty.')\n except IndexError as err:\n print(err)\n raise", "def dequeue(self) -> object:\r\n if self.is_empty():\r\n raise QueueException\r\n value = self.da.get_at_index(0)\r\n self.da.remove_at_index(0)\r\n return value", "def pop(self, index: int) -> object:\n # What if you popped the first item, or the the last in the LL??\n # have to change the front and back\n if index < 0:\n index += self.size\n if index < 0 or index >= self.size or self.size == 0:\n raise IndexError\n\n if index == 0:\n value = self.front.value\n self.delete_front()\n return value\n elif index == self.size - 1:\n value = self.back.value\n self.delete_back()\n return value\n\n curr_node = self.front\n prev_node = self.front\n curr_i = 0\n while curr_node is not None and curr_i < index:\n prev_node = curr_node\n curr_node = curr_node.next_\n curr_i += 1\n\n assert curr_node is not None\n if curr_i == index: # Not necessary\n value = curr_node.value\n prev_node.next_ = curr_node.next_ # or prev_node.next_.next_\n self.size -= 1\n return value", "def pop_last(self):\n self.pop_item(-1)", "def dequeue(self):\n # TODO: Remove and return front item, if any\n # find top_item\n if self.list.length() != 0:\n front = self.list.get_at_index(0)\n # delete top_item\n self.list.delete(front)\n return front\n else:\n raise ValueError('Queue is Empty')", "def dequeue(self): \n if self.is_empty():\n raise self.EmptyError('Queue empty')\n elem = self._queue[self._head] # element to be returned\n self._queue[self._head] = None # garbage collection\n self._head = (self._head + 1) % len(self._queue) # advance head index\n self._size -= 1\n return elem", "def pop(self, idx=-1):\n to_ret =self. __getitem__(idx)\n self.__delitem__(idx)\n return to_ret" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an array of full paths for a relative path with globs
def expand_path(__file__, path_with_globs): return glob.glob(relative_path(__file__, path_with_globs))
[ "def get_paths(file_path):\n return glob(path.join(file_path, '*'))", "def get_paths(pattern):\n if not in_source_tree:\n pattern = '../' + pattern\n\n files = glob.glob(os.path.normpath(os.path.join(top_dir, pattern)))\n return files", "def recursive_glob(path):\n if \"*\" not in path:\n # Glob isn't needed.\n return [path]\n elif \"**\" not in path:\n # Recursive glob isn't needed.\n return path_utils.glob(path)\n else:\n return path_utils.glob(path, recursive=True)", "def abspath(files):\n\n files = sum([glob.glob(x) for x in files], [])\n return [os.path.abspath(x) for x in files]", "def get_paths(target):\n\n full_path = os.path.abspath(target)\n\n if os.path.isdir(full_path):\n import glob\n file_listing = glob.glob(\"{}/**\".format(full_path), recursive=True)\n file_listing.pop(0)\n return [ this_file for this_file in file_listing if os.path.isfile(this_file) ]\n\n if not os.path.isfile(full_path):\n raise exceptions.CstashCriticalException(message=\"File does not exist\")\n\n return [full_path]", "def glob_fs(self):\n\n found_files = []\n for pattern in self.glob_patterns:\n found_files += [PathString(present_file)\n for present_file in glob.glob(pattern)]\n return found_files", "def _get_file_paths(self, fileglob):\n paths = glob.glob(f'{self.root_dir}**/{fileglob}', recursive=True)\n paths = sorted(paths)\n return paths", "def all_subpaths(path):\n path = os.path.abspath(path)\n paths = [path]\n head, _ = os.path.split(path)\n while head != '/':\n paths.append(head)\n head, _ = os.path.split(head)\n return paths", "def glob_files(path):\n return glob.glob(convert_path(path))", "def glob(self, pattern):\n return map(path, glob.glob(_base(path(self, pattern))))", "def get_all_fullpaths(self):\n files = []\n for mf in self.manifests:\n files.extend(self.manifests[mf].get_fullpaths())\n return files", "def _get_paths():\n paths = [\n '/'\n ]\n return paths", "def _all_files_relative(path):\n files = []\n for root, _, filenames in os.walk(path):\n for name in filenames:\n r = Path(root, name).relative_to(path)\n if r.suffix == \".swp\":\n continue\n files.append(r)\n return files", "def paths(self):\n rc = []\n for pg in self.path_groups:\n rc.extend(pg.paths)\n return rc", "def handle_files_args(*paths_args):\n paths = []\n\n for paths_arg in paths_args:\n # Handle paths implicitly rooted at user home dir\n paths_arg = os.path.expanduser(paths_arg)\n\n # Expand wildcards\n paths_arg = glob.glob(paths_arg)\n\n # Create list of pathlib.Path objects\n paths.extend([pathlib.Path(path_arg) for path_arg in paths_arg])\n\n return paths", "def _relativize_paths(paths):\n return [os.path.relpath(p, _BUILD_DIR) for p in paths]", "def get_all_file_paths(path):\n\n # initializing empty file paths list\n file_paths = []\n\n if isfile(path):\n return path\n\n # crawling through directory and subdirectories\n for root, directories, files in os.walk(path):\n for filename in files:\n # join the two strings in order to form the full filepath.\n filepath = os.path.join(root, filename)\n file_paths.append(filepath)\n\n # returning all file paths\n return file_paths", "def get_file_paths():\n print(os.getcwd())\n\n filepath = os.getcwd() + '/event_data'\n file_path_list = glob.glob(os.path.join(filepath, '*'))\n\n return file_path_list", "def resolve_wildcards(filenames: list[str]) -> list[Path]:\n # Resolve wildcards.\n names = []\n for n in filenames:\n i = glob.glob(n)\n if i:\n names.extend(i)\n # Remove duplicates\n names = list(set(names))\n\n # Convert strings to paths.\n paths = [Path(n) for n in names]\n return paths" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
One solution would be to do an inorder traversal and sum the values along the way (or just recursive sum along the tree). => O(N) but in case the range [lo,hi] is small, this is wasteful.
def rangeSumBST(self, root: TreeNode, lo: int, hi: int) -> int: def visit(node: TreeNode) -> int: if not node: return 0 if node.val < lo: return visit(node.right) elif hi < node.val: return visit(node.left) else: return node.val + visit(node.left) + visit(node.right) return visit(root)
[ "def rangeSumBST(self, root: TreeNode, low: int, high: int) -> int:\n self.traverse_path = []\n self.inorder(root)\n return sum(filter(lambda x: low<=x<=high, self.traverse_path))", "def sum_tree(t):\n \"*** YOUR CODE HERE ***\"\n if is_leaf(t):\n return entry(t)\n total = entry(t)\n for subtree in subtrees(t):\n total += sum_tree(subtree)\n return total", "def reduce(self, start: int = 0, end: Optional[int] = None) -> Any:\n if end is None:\n end = self.capacity\n elif end < 0:\n end += self.capacity\n\n # Init result with neutral element.\n result = self.neutral_element\n # Map start/end to our actual index space (second half of array).\n start += self.capacity\n end += self.capacity\n\n # Example:\n # internal-array (first half=sums, second half=actual values):\n # 0 1 2 3 | 4 5 6 7\n # - 6 1 5 | 1 0 2 3\n\n # tree.sum(0, 3) = 3\n # internally: start=4, end=7 -> sum values 1 0 2 = 3.\n\n # Iterate over tree starting in the actual-values (second half)\n # section.\n # 1) start=4 is even -> do nothing.\n # 2) end=7 is odd -> end-- -> end=6 -> add value to result: result=2\n # 3) int-divide start and end by 2: start=2, end=3\n # 4) start still smaller end -> iterate once more.\n # 5) start=2 is even -> do nothing.\n # 6) end=3 is odd -> end-- -> end=2 -> add value to result: result=1\n # NOTE: This adds the sum of indices 4 and 5 to the result.\n\n # Iterate as long as start != end.\n while start < end:\n\n # If start is odd: Add its value to result and move start to\n # next even value.\n if start & 1:\n result = self.operation(result, self.value[start])\n start += 1\n\n # If end is odd: Move end to previous even value, then add its\n # value to result. NOTE: This takes care of excluding `end` in any\n # situation.\n if end & 1:\n end -= 1\n result = self.operation(result, self.value[end])\n\n # Divide both start and end by 2 to make them \"jump\" into the\n # next upper level reduce-index space.\n start //= 2\n end //= 2\n\n # Then repeat till start == end.\n\n return result", "def sum(self, idx):\n idx += 1\n r = 0\n while idx > 0:\n r += self.tree[idx]\n idx -= (idx & (-idx))\n return r", "def sumRangeTree2(self, i, j, cur):\n if i > j:\n return 0\n start, end = cur.start, cur.end\n if i == start and j == end:\n return cur.val\n mid = start+(end-start)/2\n return self.sumRangeTree(i, min(j, mid), cur.left) + self.sumRangeTree(max(mid+1, i), j, cur.right)", "def recursiveSums(desiredNum, values, depth=0, max_depth=5):\n depth+=1\n if(depth>max_depth):\n return\n if(len(values)==1):\n if(values[0]==desiredNum):\n return values[0]\n else:\n arr = []\n removals = []\n for i, value in enumerate(values):\n thisDesiredNum = desiredNum-value\n if(thisDesiredNum==0):\n arr.append(value)\n elif(thisDesiredNum>0):\n #quick fix prevents double counting here\n newValues = [l for l in values if(l not in removals)]\n newValues.pop(newValues.index(value))\n arr.append([value])\n if(len(newValues)!=0 and sum(newValues)>=thisDesiredNum):\n newSums = recursiveSums(thisDesiredNum, newValues, depth, max_depth)\n if(newSums):\n if(isinstance(newSums, int)):\n arr.append([newSums])\n else:\n arr[-1].extend(newSums)\n if(len(arr[-1])==0 or arr[-1]==[value]):\n arr.pop()\n removals.append(value)\n #remove unusable values\n iteratedValues = [value for value in values if(value not in removals)]\n if(iteratedValues):\n arr.append(recursiveSums(desiredNum, iteratedValues, depth, max_depth))\n return arr", "def calculate_sum(trees: list[Tree]) -> Tree:\n return reduce(add, trees)", "def sum_recursive(seq):\n if not seq: # Empty seq is false\n return 0\n return seq[0] + sum_recursive(seq[1:])", "def cumulative_sum(t):\n \"*** YOUR CODE HERE ***\"\n sum = t.label\n for b in t.branches:\n cumulative_sum(b)\n sum += b.label\n t.label = sum", "def op_sum(self, args):\n sum = 0\n stack_levels = len(self.stack)\n if args != None:\n stack_levels = int(args[0])\n self.require_stack(stack_levels)\n for i in range(0, stack_levels):\n sum += self.stack.pop()\n self.stack.append(sum)", "def countPathsWithSumIterative(root, targetSum):\n if not root:\n return 0\n\n stack = [[root, 0]]\n totalPaths = 0\n sumHash = {0:1} # default hash\n\n while stack: # dfs traversal\n\n node, runningSum = stack.pop()\n\n runningSum += node.data\n if runningSum in sumHash:\n sumHash[runningSum] += 1\n else:\n sumHash[runningSum] = 1\n\n if node.left:\n stack.append([node.left, runningSum])\n\n if node.right:\n stack.append([node.right, runningSum])\n\n tsum = runningSum - targetSum\n if tsum in sumHash:\n totalPaths += sumHash[tsum]\n sumHash[runningSum] -= 1 # cleanup hash that has already been counted\n\n print sumHash, totalPaths", "def sumTreePaths(r, v):\n\n if not r or not v:\n return []\n\n stack = [[r]] # stacks as nodes - convert only on comparison\n path = None\n paths = [] # return paths when sum found\n logic_count = 0\n\n while stack:\n\n path = stack.pop() # last path in stack e.g. [[1],[1,2],[1,2,3]] -> [1,2,3]\n node = path[-1] # last node in path e.g. [1,2,3] -> 3\n\n # testing out some sum checks\n # first is default for finding end(sum in this case) from root\n #if sum([x.data for x in path]) == v:\n # paths.append(path)\n\n tval = None\n tsum = 0\n tpath = []\n\n for i in range(len(path)): # growth of 1 to n ..\n logic_count += 1\n tval = path[-(i+1)].data # value of path element in reverse e.g. [1,2,3] @ -1 = 3\n tsum += tval # temp sum tracker\n tpath = [tval] + tpath # temp path tracker (new path checking reverse, appending reverse)\n if tsum == v:\n paths.append(tpath)\n break\n\n if node.left:\n lpath = list(path) # new path object so original path isn't effected\n lpath.append(node.left)\n stack.append(lpath)\n\n if node.right:\n lpath = list(path) # new path object so original path isn't effected\n lpath.append(node.right)\n stack.append(lpath)\n\n print \"logical count:\", logic_count\n return paths", "def sum(numbers):", "def get_sum(n: float, value: float) -> float:\n result = np.sum([value, - 1])\n for x in range(n):\n result = np.sum([result, np.sum([x, 1])])\n return result", "def getSum2(root, level=0, maxLevel=None, sum=None):\n if root == None:\n return 0\n \n if maxLevel == None:\n maxLevel = [-1]\n sum = [0]\n \n if maxLevel[0] < level:\n sum[0] += root.data\n maxLevel[0] = level\n \n getSum2(root.right, level+1, maxLevel, sum) \n getSum2(root.left , level+1, maxLevel, sum)\n\n if level == 0:\n return sum[0]", "def ll_sum(t):\n sum = 0\n for ele in t:\n for x in ele:\n sum = sum+x\n return sum", "def sum_values(values):\n return (sum(values))", "def sum_of_nodes(t):\n return label(t) + sum([sum_of_nodes(b) for b in branches(t)])", "def binary_sums(start, limit):\n for n in range(start, limit):\n for i in range(1, n/2 + 1):\n yield i, n - i" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loops over arrays in the arrays_iterator and evaluates the cut_function at the cut_values. Returns a list of efficiences, passed events/objects, and total events/objects. cut_function is expected to return a tuple (n_pass, n_total) with input (arrays, cut_value).
def get_eff(arrays_iterator, cut_function, cut_values): n_cuts = len(cut_values) n_total = np.zeros(n_cuts) n_pass = np.zeros(n_cuts) for arrays, dataset in arrays_iterator: weight = dataset.get_weight() for i_cut, cut in enumerate(cut_values): this_n_pass, this_n_total = cut_function(arrays, cut) n_total[i_cut] += weight * this_n_total n_pass[i_cut] += weight * this_n_pass # Basically n_pass / n_total, but returns 0 if n_total has a 0 somewhere eff = np.divide(n_pass, n_total, out=np.zeros_like(n_pass), where=n_total!=0) return eff, n_pass, n_total
[ "def __cut_arrays(data_array, maximum_time, arrays_to_cut):\n\n try:\n begin_time = data_array[arrays_to_cut[0]][0][0]\n end_time = data_array[arrays_to_cut[0]][0][-1]\n delta_time = (\n data_array[arrays_to_cut[0]][0][1]\n - data_array[arrays_to_cut[0]][0][0]\n )\n total_time = end_time - begin_time\n if total_time > maximum_time:\n over_time = total_time - maximum_time\n array_elm_to_drop = int(over_time / delta_time)\n for arrays in arrays_to_cut:\n data_array[arrays][0] = data_array[arrays][0][\n array_elm_to_drop:\n ]\n data_array[arrays][1] = data_array[arrays][1][\n array_elm_to_drop:\n ]\n except:\n pass", "def apply_generic_reduction(self, arrays, dimensions, function, name):\n\n size = len(arrays)\n if size != 1:\n raise AssertionError(\"Input array should be 1\")\n\n if next(iter(arrays.values()))['function'] == 'get_data':\n array_value = next(iter(arrays.values()))\n variable = next(iter(array_value['array_input'][0].values()))['variable']\n orig_function = function\n function = function.replace('array1', variable)\n\n task = {}\n\n task['array_input'] = []\n task['array_input'].append(next(iter(arrays.keys())))\n task['orig_function'] = orig_function\n task['function'] = function\n task['expression'] = 'none'\n task['dimension'] = copy.deepcopy(dimensions)\n\n task['array_output'] = copy.deepcopy(array_value['array_output'])\n task['array_output']['variable'] = name\n task['array_output']['dimensions_order'] = \\\n self.diff_list(next(iter(array_value['array_input'][0].values()))['dimensions_order'], dimensions)\n\n for item in task['dimension']:\n if item in task['array_output']['dimensions']:\n del task['array_output']['dimensions'][item]\n result = ()\n for value in task['array_output']['dimensions_order']:\n input_task = self.task(task['array_input'][0])\n index = next(iter(input_task.values()))['array_output']['dimensions_order'].index(value)\n value = next(iter(input_task.values()))['array_output']['shape'][index]\n result += (value,)\n task['array_output']['shape'] = result\n task['operation_type'] = OperationType.Reduction\n\n return self.add_to_plan(name, task)\n else:\n variable = next(iter(arrays.keys()))\n orig_function = function\n function = function.replace('array1', variable)\n\n task = {}\n\n task['array_input'] = []\n task['array_input'].append(variable)\n task['orig_function'] = orig_function\n task['function'] = function\n task['expression'] = 'none'\n task['dimension'] = copy.deepcopy(dimensions)\n\n task['array_output'] = copy.deepcopy(next(iter(arrays.values()))['array_output'])\n task['array_output']['variable'] = name\n task['array_output']['dimensions_order'] = \\\n self.diff_list(next(iter(arrays.values()))['array_output']['dimensions_order'], dimensions)\n\n result = ()\n for value in task['array_output']['dimensions_order']:\n input_task = self.task(task['array_input'][0])\n index = next(iter(input_task.values()))['array_output']['dimensions_order'].index(value)\n value = next(iter(input_task.values()))['array_output']['shape'][index]\n result += (value,)\n task['array_output']['shape'] = result\n task['operation_type'] = OperationType.Reduction\n\n return self.add_to_plan(name, task)", "def split_iters(iter_ranges, n_threads = None):\n\n\n if n_threads is None:\n n_threads = cpu_count()\n \n counts = [safediv(r[1] - r[0], r[2]) for r in iter_ranges]\n # largest_dim = np.max(counts)\n total_count = float(np.sum(counts))\n split_factors = [ (c / total_count) ** 2 for c in counts ]\n if len(counts) > 2:\n # kludgy heuristic\n # if you're reading across multiple dimensions\n # assume there might be reuse of data read in \n # and try to split up work so it fits into cache \n expected_bytes = 8 \n for dim in counts:\n expected_bytes *= dim\n expected_kb = expected_bytes / 1024\n l2_cache_size = 8192\n n_pieces = max(n_threads, expected_kb / l2_cache_size)\n else: \n n_pieces = 2*n_threads \n \n # initialize work_items with an empty single range \n work_items = [[]]\n for (dim_idx,dim_count) in enumerate(counts):\n\n dim_start, _, dim_step = iter_ranges[dim_idx]\n n_dim_pieces = int(math.ceil(split_factors[dim_idx] * n_pieces))\n dim_factor = float(dim_count) / n_dim_pieces\n \n old_work_items = [p for p in work_items]\n work_items = []\n for i in xrange(n_dim_pieces):\n # copy all the var ranges, after which we'll modifying \n # the biggest dimension \n\n start = dim_start + int(math.floor(dim_step * dim_factor * i))\n stop = dim_start + int(math.floor(dim_step * dim_factor * (i+1)))\n \n dim_work_item = (start,stop,dim_step)\n for old_work_item in old_work_items:\n new_work_item = [r for r in old_work_item]\n new_work_item.append(dim_work_item) \n work_items.append(new_work_item)\n\n return work_items", "def _cutPointGenerator(subsets, val_len):\n setDims = list(_.dimen for _ in subsets)\n cutIters = [None] * (len(subsets)+1)\n cutPoints = [0] * (len(subsets)+1)\n i = 1\n cutIters[i] = iter(xrange(val_len+1))\n cutPoints[-1] = val_len\n while i > 0:\n try:\n cutPoints[i] = next(cutIters[i])\n if i < len(subsets)-1:\n if setDims[i] is not None:\n cutIters[i+1] = iter((cutPoints[i]+setDims[i],))\n else:\n cutIters[i+1] = iter(xrange(cutPoints[i], val_len+1))\n i += 1\n elif cutPoints[i] > val_len:\n i -= 1\n else:\n yield cutPoints\n except StopIteration:\n i -= 1", "def N50_np(len_np, cut=50):\n cutoff=sum(len_np)*cut/100.0\n len_np.sort()\n \n count=0\n for i in len_np:\n count+=i\n if count>=cutoff:\n break\n \n print ( \"N%d is %d bp.\" % (cut, i))\n \n return i", "def weighted_sum_extraction(cutout, trace, psf, ron = 12, gain = 1.2):\n ###NEW VERSION BELOW\n # width = len(cutout[0]) #we have square cutout\n # #buffer area on either ends of the trace\n # buffer = int(round(0.85*slit_length/2)) #imported from constant\n\n #width = len(cutout[0])\n spec = []\n var = []\n for i in range(len(trace)): #loop through x\n #print(i)\n #put psf at this location\n dim = np.array(cutout.shape) + np.array(psf.shape)\n #print(dim)\n weight = np.zeros( dim) #padded, to be cropped later\n\n #case where trace i is not in the image\n if trace[i] < 0 or trace[i] > cutout.shape[0]:\n spec += [0]\n var += [0]\n else:\n x = i + int(psf.shape[1]//2)\n #print(trace[i], psf.shape[0]//2)\n y = int(trace[i] + psf.shape[0]//2)\n #print(i, x, y - int(psf.shape[0]//2), y + int(psf.shape[0]//2)+1, np.shape(weight[y - int(psf.shape[0]//2): y + int(psf.shape[0]//2)+1, x - int(psf.shape[1]//2): x + int(psf.shape[1]//2)+1]))\n weight[y - int(psf.shape[0]//2): y + int(psf.shape[0]//2)+1, x - int(psf.shape[1]//2): x + int(psf.shape[1]//2)+1] = psf\n weight = weight[ int(psf.shape[0]//2): int(-psf.shape[0]//2), int(psf.shape[1]//2): int(-psf.shape[1]//2)]\n #print(weight.shape, cutout.shape)\n #plt.imshow(weight*cutout,origin = 'lower')\n #plt.show()\n\n spec += [np.sum(weight * cutout)/np.sum(weight)]\n #TODO: Is the variance calculation correct? Might need another weighted term. \n var += [np.sum(weight * (cutout/gain + (ron/gain)**2))] #variance assuming readout noise and photon noise\n\n return np.array(spec[::-1]), np.array(var[::-1]) #flip so long wavelenght is to the right", "def conceptcover(bin_arr, limit=1, uncovered=0.1):\n arr = np.copy(bin_arr)\n arr_sum = np.sum(arr)\n result = []\n while True:\n k = kernel(arr)\n i = intent(bin_arr, k)\n e = extent(bin_arr, i)\n if len(e)*len(i) < limit or (e, i) in result: break\n result.append((e, i))\n arr = removed(arr, e, i)\n if np.sum(arr)/arr_sum < uncovered: break\n return result", "def get_discrete_split_value(arr: np.ndarray, y: np.ndarray, eval_func: Callable):\n\n # First element is the weighted average eval_func of the split\n # Second term is the intrinsic value to penalize many splits.\n return (\n sum(\n [\n eval_func(y[arr == value]) * np.sum(arr == value) / len(y)\n for value in set(arr)\n ]\n ),\n -1\n * sum(\n [\n pipe(\n np.sum(arr == value) / len(y),\n lambda ratio: ratio * np.log(ratio),\n )\n for value in set(arr)\n ]\n ),\n )", "def calc_shots(data):\n shot_data = []\n for year in data:\n shots = {}\n shots['2-pt 0-5 ft'] = np.zeros(2, dtype=int)\n shots['2-pt 6-10 ft'] = np.zeros(2, dtype=int)\n shots['2-pt 11-15 ft'] = np.zeros(2, dtype=int)\n shots['2-pt >15 ft'] = np.zeros(2, dtype=int)\n shots['3-pt <26 ft'] = np.zeros(2, dtype=int)\n shots['3-pt 26-30 ft'] = np.zeros(2, dtype=int)\n shots['3-pt >30 ft'] = np.zeros(2, dtype=int)\n two_pt_zero_to_five_ft = year[year['ShotDist'] <= 5]\n shots['2-pt 0-5 ft'][0] = len(two_pt_zero_to_five_ft)\n shots['2-pt 0-5 ft'][1] = len(two_pt_zero_to_five_ft[two_pt_zero_to_five_ft['ShotOutcome'] == 'make'])\n two_pt_six_to_ten_ft = year[(year['ShotDist'] > 5) & (year['ShotDist'] <= 10)]\n shots['2-pt 6-10 ft'][0] = len(two_pt_six_to_ten_ft)\n shots['2-pt 6-10 ft'][1] = len(two_pt_six_to_ten_ft[two_pt_six_to_ten_ft['ShotOutcome'] == 'make'])\n two_pt_eleven_to_fifteen_ft = year[(year['ShotDist'] > 10) & (year['ShotDist'] <= 15)]\n shots['2-pt 11-15 ft'][0] = len(two_pt_eleven_to_fifteen_ft)\n shots['2-pt 11-15 ft'][1] = len(two_pt_eleven_to_fifteen_ft[two_pt_eleven_to_fifteen_ft['ShotOutcome'] == 'make'])\n two_pt_fifteen_plus_ft = year[(year['ShotType'].str.contains('2-pt')) & (year['ShotDist'] > 15)]\n shots['2-pt >15 ft'][0] = len(two_pt_fifteen_plus_ft)\n shots['2-pt >15 ft'][1] = len(two_pt_fifteen_plus_ft[two_pt_fifteen_plus_ft['ShotOutcome'] == 'make'])\n three_pt_twenty_five_or_less_ft = year[(year['ShotType'].str.contains('3-pt')) & (year['ShotDist'] <= 25)]\n shots['3-pt <26 ft'][0] = len(three_pt_twenty_five_or_less_ft)\n shots['3-pt <26 ft'][1] = len(three_pt_twenty_five_or_less_ft[three_pt_twenty_five_or_less_ft['ShotOutcome'] == 'make'])\n three_pt_twenty_six_to_thirty_ft = year[(year['ShotDist'] > 25) & (year['ShotDist'] <= 30)]\n shots['3-pt 26-30 ft'][0] = len(three_pt_twenty_six_to_thirty_ft)\n shots['3-pt 26-30 ft'][1] = len(three_pt_twenty_six_to_thirty_ft[three_pt_twenty_six_to_thirty_ft['ShotOutcome'] == 'make'])\n three_pt_thirty_plus_ft = year[year['ShotDist'] > 30]\n shots['3-pt >30 ft'][0] = len(three_pt_thirty_plus_ft)\n shots['3-pt >30 ft'][1] = len(three_pt_thirty_plus_ft[three_pt_thirty_plus_ft['ShotOutcome'] == 'make'])\n shot_data.append(shots)\n return shot_data", "def apply_crosstalk(self, amp_arrays):\n if self.ccd.xtalk is None:\n return amp_arrays\n output = []\n for amp_index, xtalk_row in enumerate(self.ccd.xtalk):\n output.append(amp_arrays[amp_index] +\n sum([x*y for x, y in zip(amp_arrays, xtalk_row)]))\n return output", "def _computeValueFunction(self, nbDims, low, high, retstep=False):\n # algorithms performing in discrete space will have a discrete\n # value function that cannot be evaluated at any point - only on the\n # ones for which they have been setup based on the problem it has been\n # setup to solve\n def __round(vec):\n return tuple(int(x) for x in vec)\n\n def __notround(vec):\n return vec\n\n _round = __notround\n if self._algo.DOMAIN['state'] == Spaces.Discrete:\n _round = __round\n\n allParams, stepSizes = self._discretizer.discretize(retstep=True)\n\n allActions = self._problem.getActionsList()\n reducer = max if self.reducer == 'max' else mean\n\n # returns a list\n data = [\n utils.extends({\n key: state[k]\n for k, key in enumerate(self.getKeys(nbDims))\n }, z=reducer([\n self._algo.actionValue(_round(state), action)\n for action in allActions]))\n for state in allParams\n ]\n if retstep:\n return data, stepSizes\n return data", "def get_split_goodness_fit_continuous (\n feature_array: np.ndarray, target_array: np.ndarray, split: float, evaluate_function: Callable\n ):\n # Get above and below the split value\n above = feature_array >= split\n below = feature_array < split\n\n # Get weighted average evaluate_function on the splits\n n_above = np.sum ( above )\n above_eval = (\n evaluate_function ( target_array [ above ] ) * n_above / len ( target_array )\n ) # Weight = frac points in above\n below_eval = (\n evaluate_function ( target_array [ below ] ) * ( len ( target_array ) - n_above ) / len ( target_array )\n ) # Weight = frac points not in above\n\n # returns weighted sum of evaluate_function across splits & the gain ratio denominator\n return (\n above_eval + below_eval,\n -1\n * sum (\n map (\n lambda x: x * np.log ( x ),\n [ n_above / len ( target_array ), ( len ( target_array ) - n_above ) / len ( target_array ) ],\n )\n ),\n ) # End get_split_goodness_fit_continuous", "def test_algorithm_2():\n k = 5\n num_of_cov_sizes = 40\n n_sizes = np.linspace(10, num_of_cov_sizes, num_of_cov_sizes - 10, dtype=int)\n\n # n_array = np.array([10,20,30,40,50])\n n_array = np.array([])\n for i in n_sizes:\n n_array = np.append(n_array, i)\n\n times_array = np.array([])\n for n in n_array:\n n = int(n)\n cov = dg_create_random_cov(n) # ndarray\n r_time = calculate_running_time_algorithm_2(cov, k) # Algorithm 2\n times_array = np.append(times_array, r_time)\n\n plts_plot_algorithm_scale(12, 4, n_array, times_array)\n pass", "def get_split_goodness_fit_continuous(\n arr: np.ndarray, y: np.ndarray, split: float, eval_func: Callable\n ):\n # Get above and below the split value\n above = arr >= split\n below = arr < split\n\n # get weighted average eval_func on the splits\n n_above = np.sum(above)\n above_eval = (\n eval_func(y[above]) * n_above / len(y)\n ) # weight = frac points in above\n below_eval = (\n eval_func(y[below]) * (len(y) - n_above) / len(y)\n ) # weight = frac points not in above\n\n # returns weighted sum of eval_func across splits, and the gain ratio denominator\n return (\n above_eval + below_eval,\n -1\n * sum(\n map(\n lambda x: x * np.log(x),\n [n_above / len(y), (len(y) - n_above) / len(y)],\n )\n ),\n )", "def calc_arrayed_fitnesses(self):\n for i, p in enumerate(self.populations):\n for c in p.chromosomes:\n v = self.array_values_replace(i, c.values() )\n c.fitness = self.fitness_functions(i, v)\n p.sort_population()", "def iterations(self, n, fitness_function):", "def compute_cost_and_order_cuts(cuts, cost_function):\n\n cost_cuts = np.zeros(len(cuts.values), dtype=float)\n for i_cut, cut in enumerate(cuts.values):\n cost_cuts[i_cut] = cost_function(cut)\n idx = np.argsort(cost_cuts)\n\n cuts.values = cuts.values[idx]\n cuts.costs = cost_cuts[idx]\n if cuts.names is not None:\n cuts.names = cuts.names[idx]\n if cuts.equations is not None:\n cuts.equations = cuts.equations[idx]\n\n return cuts", "def cut_values(self):\n #read file containg the cut values for all files\n input_file = open(\"/users/langrock/plotting_macros/SMELLIE/SMELLIE_analysis_framework/datatables/cut_values.txt\",'r')\n for line in input_file:\n #find the fibre\n if self.f in line:\n words = line.split()\n for i in range(1,9):\n #fill the list with the spatial cut values\n self.spatialcuts.append(float(words[i]))\n for i in range(9,16):\n #fill the list with the timing cut values\n self.timecuts.append(float(words[i]))\n\n else:\n continue\n\n else:\n print \"Reached end of file!\"", "def _iterate_over_factors(self, func, args):\n # TODO The user may prefer to provide the arguments as lists and receive them as\n # TODO lists, as this may be the form in which they are available. This should\n # TODO be allowed, rather than packing and unpacking them repeatedly.\n args_list, numerical_args = self._validate_and_prepare_args_for_iteration(args)\n\n out = [\n self._get_method(self.factors[i], func, args_list[i], numerical_args)\n for i in range(len(self.factors))\n ]\n if self._pool_outputs:\n return self._pool_outputs_from_function(out)\n return out" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Expects a list of signals and a list of bkgs (Dataset objects), and a cut_function and cut_values.
def roccurve(signals, bkgs, cut_function, cut_values): eff_sig, n_pass_sig, n_total_sig = get_eff(svjflatanalysis.iterate(signals), cut_function, cut_values) eff_bkg, n_pass_bkg, n_total_bkg = get_eff(svjflatanalysis.iterate(bkgs), cut_function, cut_values) return eff_sig, eff_bkg, n_pass_sig, n_pass_bkg, n_total_sig, n_total_bkg
[ "def apply_cuts(signal_data, bkg_data, percent_sig_to_keep=100, bkg_length=None):\n# if percent_sig_to_keep < 100:\n# raise NotImplementedError(\"percentage of < 100 not yet imlemented\")\n percentile = [0, percent_sig_to_keep] # TODO: modify for percent_sig_to_keep\n bkg_length_before = len(bkg_data)\n bkg_length = len(bkg_data) if bkg_length in (None, 0) else bkg_length\n\n lower_cut, upper_cut = np.percentile(signal_data, percentile)\n cut_bkg = np.count_nonzero(np.logical_or(bkg_data < lower_cut, bkg_data > upper_cut))\n rejected_bkg = (bkg_length_before - cut_bkg) / bkg_length\n\n return [lower_cut, upper_cut], rejected_bkg", "def cut_feas(data, feas, target, cut_params=20):\n ## get df ##\n if target is None:\n df = data[[feas]].sort_values(feas).reset_index(drop=True)\n else:\n df = data[[feas] + [target]].sort_values(feas).reset_index(drop=True)\n\n ## start cut ##\n if cut_params is None:\n # not cut\n bins = df[feas].unique().tolist()\n\n else:\n if isinstance(cut_params, list):\n bins = cut_params # cut\n\n elif isinstance(cut_params, int):\n bins = pd.qcut(df[feas], cut_params, duplicates='drop', retbins=True)[1].tolist() # qcut\n\n else:\n try:\n cut_params['KMeans']\n bins = cut_feas_bykmean(df, feas, cut_params) # cut by kmean\n except:\n bins = cut_feas_bytree(df, feas, target, cut_params) # cut by tree\n\n if len(bins) == 1:\n bins = [-np.inf, np.inf]\n else:\n bins[0], bins[len(bins) - 1] = -np.inf, np.inf\n df[feas] = pd.cut(df[feas], bins)\n\n # get st\n st1 = pd.DataFrame(df[feas].value_counts(dropna=False).sort_index())\n st1.index = st1.index.astype(str)\n df[feas] = df[feas].astype(str)\n\n if target is None:\n return df, st1, bins\n else:\n st2 = df.groupby([feas], dropna=False).agg({target: 'mean'})\n st = st1.merge(st2, left_index=True, right_index=True)\n return df, st, bins", "def __init__(self, dataset_list, cut_list, caption = None):\n self.dataset_list = dataset_list\n self.cut_list = cut_list\n self.caption = caption\n\n self.cumulative_cuts = []\n for i, cut in enumerate(self.cut_list):\n aux_cumulative_cut = \" && \".join(self.cut_list[:i+1])\n self.cumulative_cuts.append( aux_cumulative_cut )\n\n self.table_list = []\n self.FillTable()", "def plot_roccurves_per_bkg(signals, bkgs, cut_function, cut_values, ax=None):\n # Get a default ax if none is given\n if ax is None:\n import matplotlib.pyplot as plt\n fig = plt.figure(figsize=(8,8))\n ax = fig.gca()\n # Get signal efficieny once\n eff_sig, n_pass_sig, n_total_sig = get_eff(svjflatanalysis.iterate(signals), cut_function, cut_values)\n # Perform some basic plotting setup\n ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray')\n ax.set_xlim(0.0, 1.05)\n ax.set_ylim(0.0, 1.05)\n ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE)\n ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE)\n ax.legend(fontsize=DEFAULT_FONTSIZE)\n # Then efficiencies per bkg category (ttjets, qcd, ...)\n bkg_categories = list(set([ b.get_category() for b in bkgs ]))\n bkg_categories.sort()\n lines = {}\n for bkg_cat in bkg_categories:\n # Get Datasets that have this category\n bkgs_this_cat = [ b for b in bkgs if b.get_category() == bkg_cat ]\n # Compute efficiency in this category\n eff_bkg, n_pass_bkg, n_total_bkg = get_eff(svjflatanalysis.iterate(bkgs_this_cat), cut_function, cut_values)\n # Draw roccurve for this category\n line = _draw_roccurve(eff_sig, eff_bkg, cut_values, ax)\n line.set_label(bkg_cat)\n # Save this line in a dict for potential outputting/modifying\n lines[bkg_cat] = line\n return ax", "def get_fidcuts():\n return combine_cuts([fid_cuts('muN_pt', 'muN_eta'),\n fid_cuts('muP_pt', 'muP_eta')])", "def cut_values(self):\n #read file containg the cut values for all files\n input_file = open(\"/users/langrock/plotting_macros/SMELLIE/SMELLIE_analysis_framework/datatables/cut_values.txt\",'r')\n for line in input_file:\n #find the fibre\n if self.f in line:\n words = line.split()\n for i in range(1,9):\n #fill the list with the spatial cut values\n self.spatialcuts.append(float(words[i]))\n for i in range(9,16):\n #fill the list with the timing cut values\n self.timecuts.append(float(words[i]))\n\n else:\n continue\n\n else:\n print \"Reached end of file!\"", "def bessel_bandpass_filter(data, lowcut, highcut, fs, order=2):\n\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n\n # bessel() and lfilter() are from scipy.signal\n\n b, a = bessel(order, [low, high], btype='band')\n y = lfilter(b, a, data)\n return y", "def discretize(data, bins, list_to_discrete):\n for col_name in list_to_discrete:\n data[col_name] = pd.cut(data[col_name], bins, right=True,\n precision=4).cat.codes", "def cutflow(self, *names):\n for cut in names:\n if not isinstance(cut, str) or cut not in self._names:\n raise ValueError(\n \"All arguments must be strings that refer to the names of existing selections\"\n )\n\n masksonecut, maskscutflow = [], []\n for i, cut in enumerate(names):\n mask1 = self.any(cut)\n mask2 = self.all(*(names[: i + 1]))\n masksonecut.append(mask1)\n maskscutflow.append(mask2)\n\n if not self.delayed_mode:\n nevonecut = [len(self._data)]\n nevcutflow = [len(self._data)]\n nevonecut.extend(numpy.sum(masksonecut, axis=1))\n nevcutflow.extend(numpy.sum(maskscutflow, axis=1))\n\n else:\n nevonecut = [dask_awkward.count(self._data, axis=0)]\n nevcutflow = [dask_awkward.count(self._data, axis=0)]\n nevonecut.extend([dask_awkward.sum(mask1) for mask1 in masksonecut])\n nevcutflow.extend([dask_awkward.sum(mask2) for mask2 in maskscutflow])\n\n return Cutflow(\n names, nevonecut, nevcutflow, masksonecut, maskscutflow, self.delayed_mode\n )", "def bin_continuous_features(self, data_df, features_to_bin=[]):\n for feature_to_bin in features_to_bin:\n if feature_to_bin['name'] in data_df.columns:\n if len(feature_to_bin['bins']) == 1:\n if feature_to_bin['bins'][0] == 'quantile':\n # bin to quantiles.\n quantiles = [0.2, 0.4, 0.6, 0.8]\n min_val = data_df.agg({feature_to_bin['name']: 'min'}).collect()[0][0] -1\n max_val = data_df.agg({feature_to_bin['name']: 'max'}).collect()[0][0] +1\n q = data_df.approxQuantile(feature_to_bin['name'], quantiles, relativeError=0.05) #relativeError of 0 is more accurate but more computationaly expensive.\n bins = [min_val] + q + [max_val]\n data_df = spark_cut(data_df, feature_to_bin['name'], bins=bins,\n labels=[f'Q_{str(q)[2]}0%' for q in quantiles]+['Q100%'])\n\n if isinstance(feature_to_bin['bins'][0], int):\n # an integer (n) was provided. bin to n identical sections.\n if len(data_df.select(feature_to_bin['name']).distinct().collect()) > feature_to_bin['bins'][0]:\n min_val = data_df.agg({feature_to_bin['name']: 'min'}).collect()[0][0] -1\n max_val = data_df.agg({feature_to_bin['name']: 'max'}).collect()[0][0] +1\n bins = np.linspace(min_val, max_val, feature_to_bin['bins'][0])\n data_df = spark_cut(data_df, feature_to_bin['name'], bins=bins,\n labels=range(feature_to_bin['bins'][0]-1))\n else:\n # bin according to the provided bins.\n min_val = data_df.agg({feature_to_bin['name']: \"min\"}).collect()[0][0]\n max_val = data_df.agg({feature_to_bin['name']: \"max\"}).collect()[0][0]\n full_bins = sorted([(min_val - 1)] + feature_to_bin['bins'] + [(max_val + 1)])\n data_df = spark_cut(\n data_df, feature_to_bin['name'],\n bins=full_bins,\n labels=range(len(full_bins) - 1))\n return data_df", "def cut_graph(self):\n graph = self._graph.copy()\n values = self.get_values()\n cuts = []\n\n for name, value in zip(self._names, values):\n # get cuts\n if name[0] == \"x\" and value > 0.5:\n drc, i, j = name.split(\"_\")\n i, j = int(i), int(j)\n # apply cut\n if drc == \"xr\":\n cut = ((i, j), (i, j + 1))\n elif drc == \"xc\":\n cut = ((i, j), (i + 1, j))\n elif drc == \"xf\":\n cut = ((i, j), (i + 1, j + 1))\n elif drc == \"xb\":\n cut = ((i, j), (i - 1, j + 1))\n else:\n raise SystemExit(\"Edge variable is wrong\")\n graph.remove_edge(*cut)\n cuts.append(cut)\n\n return graph, cuts", "def cut_feas_bykmean(data, feas, cut_params={'KMeans': 4}):\n df = data[[feas]].sort_values(feas).reset_index(drop=True)\n tmp = df.dropna(subset=[feas]).reset_index(drop=True)\n\n n_clusters = cut_params.get('KMeans', 4)\n kmeans = KMeans(n_clusters=n_clusters, init='k-means++', n_init=10, max_iter=300).fit(tmp)\n tmp['feassaxsxax'] = kmeans.fit_predict(tmp)\n st = tmp.groupby(['feassaxsxax']).agg({feas: [min, max]})\n bins = sorted(list(st[feas]['max']))\n bins[len(bins) - 1] = np.inf\n bins = [-np.inf] + bins\n\n return bins", "def butterBand(data, lowCut, highCut, sampRat, order=4):\n f0 = 0.5 * sampRat\n low = lowCut / f0\n high = highCut / f0\n b, a = scipySingal.butter(order, [low, high], btype='bandpass')\n butterData = scipySingal.lfilter(b, a, data)\n return butterData", "def append_cuts(cuts: Iterable[Cut]) -> Cut:\n # The following is a fold (accumulate/aggregate) operation; it starts with cuts[0], and appends cuts[1] to it;\n # then takes their it concatenation and appends cuts[2] to it; and so on.\n return reduce(append, cuts)", "def ANN_binned_tagged_jets_hist(datalist, model, discriminant_cuts, CSV_cuts, bins, nbins, mode=\"pT_jet\",Save=False,addFeature=False):\n title = \"binned_tagged_jets_vs_\"+mode\n\tdiscriminant = \"ANN\"\n AllJetsHistlist = []\n CSVHistlist = []\n DiscriminantHistlist = []\n if mode == \"pT_hadron\":\n feature = 2\n elif mode == \"pT_jet\":\n feature = 3\n elif mode == \"decay_vx\":\n feature = 4\n for n,data in enumerate(datalist):\n\t\tdatatitle = data[3]\n print \"working on\",datatitle\n ran = data[4]\n\t\tCSV = data[2]\n\t\tpT = data[1]\n\t\tx_data = data[0]\n AllJetsHistlist.append(rt.TH1D(datatitle+\"_AllJets\",datatitle+\"_\"+title,nbins,ran[0],ran[1]))\n AllJetsHistlist[n].SetLineColor(4)\n CSVHistlist.append(rt.TH1D(datatitle+\"_CSV\",datatitle+\"_\"+title,nbins,ran[0],ran[1]))\n CSVHistlist[n].SetLineColor(3)\n DiscriminantHistlist.append(rt.TH1D(datatitle+\"_Discriminant\",datatitle+\"_\"+title,nbins,ran[0],ran[1]))\n DiscriminantHistlist[n].SetLineColor(2)\n\t\n\t\tif addFeature == False:\n\t\t\tpred_y = model.predict(ANN_functional_shape(x_data))\n\t\telif addFeature == \"pT\":\n\t\t\tpred_y = model.predict(ANN_functional_shape(x_data)+[pT/200])\n\t\telif addFeature == \"PV\":\n\t\t\tassert x_data.shape[1] == 21, \"wrong x_data format: PV cannot be found\"\n\t\t\tpred_y = model.predict(ANN_functional_shape(x_data)+[x_data[:,-1]/10.])\n\t\telse:\n\t\t\tprint \"invalid feature input\"\n\t\t\treturn None\n\t\tbin_numbers = ANN_bin_selection(pT,bins)\n\n\t for i,pT_value in enumerate(pT):\n\t if bin_numbers[i] == -100: continue\n\t\t\tAllJetsHistlist[n].Fill(pT_value)\n\t if pred_y[i] >= discriminant_cuts[bin_numbers[i]]: DiscriminantHistlist[n].Fill(pT_value)\n\t if CSV[i] >= CSV_cuts[bin_numbers[i]]: CSVHistlist[n].Fill(pT_value)\n\n canvaslist = []\n legendlist = []\n Tfilelist = []\n for n,data in enumerate(datalist):\n\t\tdatatitle = data[3]\n canvaslist.append(rt.TCanvas(datatitle+\"_canvas\",\"canvas\",600,600))\n canvaslist[n].SetTitle(datatitle+\"_\"+title)\n rt.gStyle.SetOptStat(0)\n legendlist.append(rt.TLegend(0.9,0.9,0.65,0.75))\n legendlist[n].AddEntry(AllJetsHistlist[n], \"All jets\")\n legendlist[n].AddEntry(CSVHistlist[n], \"CSV\")\n legendlist[n].AddEntry(DiscriminantHistlist[n], discriminant)\n AllJetsHistlist[n].GetXaxis().SetTitle(mode)\n AllJetsHistlist[n].GetYaxis().SetTitle('# jets')\n AllJetsHistlist[n].GetYaxis().SetTitleOffset(1.5)\n #AllJetsHistlist[n].Draw()\n #CSVHistlist[n].Draw(\"SAME\")\n #DiscriminantHistlist[n].Draw(\"SAME\")\n #legendlist[n].Draw()\n if Save:\n #canvaslist[n].SaveAs(title+\"_\"+datatitle+discriminant+\".png\")\n Tfilelist.append(rt.TFile(\"Thesis_Plots/root_files/\"+title+\"_\"+datatitle+discriminant+\".root\",\"recreate\"))\n print \"saved histogram as Thesis_Plots/root_files/\"+title+\"_\"+datatitle+discriminant+\".root\"\n AllJetsHistlist[n].Write()\n CSVHistlist[n].Write()\n DiscriminantHistlist[n].Write()", "def targetFromSignals(obars, nbands=3, amount=1, targetprofit=15., stoploss=45.):\n # bandsg, yband, ask, bid, day, amount, targetprofit, stoploss\n bars = obars.copy()\n for j in range(nbands): # for each band traverse it\n ibandsg = bars.columns.get_loc('bandsg'+str(j))\n # being pessimistic ... right\n ybandsell = traverseSellBand(bars.iloc[:, ibandsg].values.astype(int),\n bars.H.values, bars.L.values, bars.date.values,\n amount, targetprofit, stoploss)\n ybandbuy = traverseBuyBand(bars.iloc[:, ibandsg].values.astype(int),\n bars.H.values, bars.L.values, bars.date.values,\n amount, targetprofit, stoploss)\n bars['y'+str(j)] = mergebandsignals(ybandsell, ybandbuy)\n\n return bars", "def efficient_binned_tagged_jets_hist(datalist,discriminant, discriminant_cuts, CSV_cuts, bins, nbins, Difference=False, mode=\"pT_hadron\",Save=False):\n\ttitle = \"binned_tagged_jets_vs_\"+mode\n\tAllJetsHistlist = []\n\tCSVHistlist = []\n\tDiscriminantHistlist = []\n\tif mode == \"pT_hadron\":\n\t\tfeature = 2\n\telif mode == \"pT_jet\":\n\t\tfeature = 3\n\telif mode == \"decay_vx\":\n\t\tfeature = 4\n\tfor n,data in enumerate(datalist): \n print \"working on\",data[1]\n\t\tran = data[2]\n\t\tAllJetsHistlist.append(rt.TH1D(data[1]+\"_AllJets\",data[1]+\"_\"+title,nbins,ran[0],ran[1]))\n\t\tAllJetsHistlist[n].SetLineColor(4)\n\t\tCSVHistlist.append(rt.TH1D(data[1]+\"_CSV\",data[1]+\"_\"+title,nbins,ran[0],ran[1]))\n\t\tCSVHistlist[n].SetLineColor(3)\n\t\tDiscriminantHistlist.append(rt.TH1D(data[1]+\"_Discriminant\",data[1]+\"_\"+title,nbins,ran[0],ran[1]))\n DiscriminantHistlist[n].SetLineColor(2)\n for particle in data[0]:\n\t\t\tbin_number = bin_selection(particle,bins)\n\t\t\tif bin_number == -100: continue\n\t\t\tAllJetsHistlist[n].Fill(particle[feature])\n\t\t\tif particle[1] >= CSV_cuts[bin_number]: CSVHistlist[n].Fill(particle[feature])\n if Difference:\n L = particle[8]-particle[5]\n else:\n try:\n\t\t\t\t\tL = particle[16]/float(particle[13])\n\t\t\t\texcept ZeroDivisionError:\n\t\t\t\t\tcontinue\n\t\t\tif L >= discriminant_cuts[bin_number]: DiscriminantHistlist[n].Fill(particle[feature])\n\tcanvaslist = []\n\tlegendlist = []\n\tTfilelist = []\n\tfor n,data in enumerate(datalist):\n\t\tcanvaslist.append(rt.TCanvas(data[1]+\"_canvas\",\"canvas\",600,600))\n\t\tcanvaslist[n].SetTitle(data[1]+\"_\"+title)\n \trt.gStyle.SetOptStat(0)\n\t\tlegendlist.append(rt.TLegend(0.9,0.9,0.65,0.75))\n\t\tlegendlist[n].AddEntry(AllJetsHistlist[n], \"All jets\")\n\t\tlegendlist[n].AddEntry(CSVHistlist[n], \"CSV\")\n\t\tlegendlist[n].AddEntry(DiscriminantHistlist[n], discriminant)\n \tAllJetsHistlist[n].GetXaxis().SetTitle(mode)\n \tAllJetsHistlist[n].GetYaxis().SetTitle('# jets')\n \tAllJetsHistlist[n].GetYaxis().SetTitleOffset(1.5)\n\t\tAllJetsHistlist[n].Draw()\n\t\tCSVHistlist[n].Draw(\"SAME\")\n\t\tDiscriminantHistlist[n].Draw(\"SAME\")\n\t\tlegendlist[n].Draw()\n\t\tif Save:\n\t\t\tcanvaslist[n].SaveAs(title+\"_\"+data[1]+discriminant+\".png\")\n\t\t\tTfilelist.append(rt.TFile(\"histogram_files/pT_hists/\"+title+\"_\"+data[1]+discriminant+\".root\",\"recreate\"))\n \tprint \"saved histogram as histogram_files/pT_hists/\"+title+\"_\"+data[1]+discriminant+\".root\"\n\t\t\tAllJetsHistlist[n].Write()\n\t\t\tCSVHistlist[n].Write()\n\t\t\tDiscriminantHistlist[n].Write()", "def compute_pseudovals_jackknife(treatment_cookie_buckets, control_cookie_buckets,\n bins_boundaries, quantile_to_test):\n\n # convert to arrays for treatment and control groups\n\n # !!! some notebooks have to be updates to pass arrays\n\n # data_treat = treatment_cookie_buckets.values\n # data_control = control_cookie_buckets.values\n\n # compute number of cookie buckets\n num_of_cookie_buckets = treatment_cookie_buckets.shape[0]\n assert num_of_cookie_buckets == control_cookie_buckets.shape[0]\n\n # list of all bucket ids\n indices = np.arange(num_of_cookie_buckets)\n\n # for stacking the results for both treatment and control groups\n treatment_group_mv = list()\n control_group_mv = list()\n\n # number of histogram bins\n num_of_bins = len(bins_boundaries)-1\n\n # obtain bins given boundaries\n bins_tuples = [(bins_boundaries[i-1],\n bins_boundaries[i])\n for i in range(1, num_of_bins+1)]\n\n for cur_bucket in indices:\n # drop current bucket\n ind_treat = np.delete(indices, cur_bucket)\n ind_control = np.delete(indices, cur_bucket)\n # get corresponding histograms\n cur_hist_treatment = treatment_cookie_buckets[ind_treat, :].sum(axis=0)\n cur_hist_control = control_cookie_buckets[ind_control, :].sum(axis=0)\n # compute approximate quantiles based on binned data\n treatment_group_mv += [compute_quantile_hist_data(\n cur_hist_treatment, bins_tuples, quantile=quantile_to_test)]\n control_group_mv += [compute_quantile_hist_data(\n cur_hist_control, bins_tuples, quantile=quantile_to_test)]\n\n # convert to array for further purposes\n treatment_group_mv = np.stack(treatment_group_mv)\n control_group_mv = np.stack(control_group_mv)\n\n # compute approximate quantiles for all buckets and both groups\n treatment_all = compute_quantile_hist_data(\n treatment_cookie_buckets.sum(axis=0), bins_tuples, quantile=quantile_to_test)\n\n control_all = compute_quantile_hist_data(\n control_cookie_buckets.sum(axis=0), bins_tuples, quantile=quantile_to_test)\n\n # compute percent change for total change of metric value (mv)\n overall_percent_change = 100 * \\\n (treatment_all / control_all - 1)\n\n # compute percent change for all buckets except given\n percent_change_for_all_but_given = 100 * \\\n (treatment_group_mv / control_group_mv - 1)\n\n # compute ps_{-j} according to the definition\n ps_j = num_of_cookie_buckets * overall_percent_change - \\\n (num_of_cookie_buckets - 1) * percent_change_for_all_but_given\n\n return ps_j", "def insert_random_cut(self, cuts):\n possible_cuts = [(i, c) for i, s in enumerate(self.segments) for c in cuts if s.start < c.start and c.end < s.end]\n if possible_cuts:\n self.insert_cut(*choice(possible_cuts))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Basic plotting style for a single roccurve, based on multiple signal and bkgs samples. Expects an ax object to be given, this function is not standalone
def plot_roccurve(signals, bkgs, cut_function, cut_values, ax): eff_sig, eff_bkg, n_pass_sig, n_pass_bkg, n_total_sig, n_total_bkg = roccurve(signals, bkgs, cut_function, cut_values) return _draw_roccurve(eff_sig, eff_bkg, cut_values, ax)
[ "def plot_single_roccurve(signals, bkgs, cut_function, cut_values, ax=None):\n # Get a default ax if none is given\n if ax is None:\n import matplotlib.pyplot as plt\n fig = plt.figure(figsize=(8,8))\n ax = fig.gca()\n # Plot the base line\n ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray')\n # Plot the single roccurve\n line = plot_roccurve(signals, bkgs, cut_function, cut_values, ax=ax)\n line.set_label(bkgs[0].get_category())\n # Plot settings\n ax.set_xlim(0.0, 1.05)\n ax.set_ylim(0.0, 1.05)\n ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE)\n ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE)\n ax.legend(fontsize=DEFAULT_FONTSIZE)\n return ax", "def plot_roccurves_per_bkg(signals, bkgs, cut_function, cut_values, ax=None):\n # Get a default ax if none is given\n if ax is None:\n import matplotlib.pyplot as plt\n fig = plt.figure(figsize=(8,8))\n ax = fig.gca()\n # Get signal efficieny once\n eff_sig, n_pass_sig, n_total_sig = get_eff(svjflatanalysis.iterate(signals), cut_function, cut_values)\n # Perform some basic plotting setup\n ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray')\n ax.set_xlim(0.0, 1.05)\n ax.set_ylim(0.0, 1.05)\n ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE)\n ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE)\n ax.legend(fontsize=DEFAULT_FONTSIZE)\n # Then efficiencies per bkg category (ttjets, qcd, ...)\n bkg_categories = list(set([ b.get_category() for b in bkgs ]))\n bkg_categories.sort()\n lines = {}\n for bkg_cat in bkg_categories:\n # Get Datasets that have this category\n bkgs_this_cat = [ b for b in bkgs if b.get_category() == bkg_cat ]\n # Compute efficiency in this category\n eff_bkg, n_pass_bkg, n_total_bkg = get_eff(svjflatanalysis.iterate(bkgs_this_cat), cut_function, cut_values)\n # Draw roccurve for this category\n line = _draw_roccurve(eff_sig, eff_bkg, cut_values, ax)\n line.set_label(bkg_cat)\n # Save this line in a dict for potential outputting/modifying\n lines[bkg_cat] = line\n return ax", "def plot(\n ecg, \n sample_rate = 500, \n title = 'ECG 12', \n lead_index = lead_index, \n lead_order = None,\n style = None,\n columns = 2,\n row_height = 6,\n show_lead_name = True,\n show_grid = True,\n show_separate_line = True,\n ):\n\n if not lead_order:\n lead_order = list(range(0,len(ecg)))\n secs = len(ecg[0])/sample_rate\n leads = len(lead_order)\n rows = ceil(leads/columns)\n # display_factor = 2.5\n display_factor = 1\n line_width = 0.5\n fig, ax = plt.subplots(figsize=(secs*columns * display_factor, rows * row_height / 5 * display_factor))\n display_factor = display_factor ** 0.5\n fig.subplots_adjust(\n hspace = 0, \n wspace = 0,\n left = 0, # the left side of the subplots of the figure\n right = 1, # the right side of the subplots of the figure\n bottom = 0, # the bottom of the subplots of the figure\n top = 1\n )\n\n fig.suptitle(title)\n\n x_min = 0\n x_max = columns*secs\n y_min = row_height/4 - (rows/2)*row_height\n y_max = row_height/4\n\n if (style == 'bw'):\n color_major = (0.4,0.4,0.4)\n color_minor = (0.75, 0.75, 0.75)\n color_line = (0,0,0)\n else:\n color_major = (1,0,0)\n color_minor = (1, 0.7, 0.7)\n color_line = (0,0,0.7)\n\n if(show_grid):\n ax.set_xticks(np.arange(x_min,x_max,0.2)) \n ax.set_yticks(np.arange(y_min,y_max,0.5))\n\n ax.minorticks_on()\n \n ax.xaxis.set_minor_locator(AutoMinorLocator(5))\n\n ax.grid(which='major', linestyle='-', linewidth=0.5 * display_factor, color=color_major)\n ax.grid(which='minor', linestyle='-', linewidth=0.5 * display_factor, color=color_minor)\n\n ax.set_ylim(y_min,y_max)\n ax.set_xlim(x_min,x_max)\n\n\n for c in range(0, columns):\n for i in range(0, rows):\n if (c * rows + i < leads):\n y_offset = -(row_height/2) * ceil(i%rows)\n # if (y_offset < -5):\n # y_offset = y_offset + 0.25\n\n x_offset = 0\n if(c > 0):\n x_offset = secs * c\n if(show_separate_line):\n ax.plot([x_offset, x_offset], [ecg[t_lead][0] + y_offset - 0.3, ecg[t_lead][0] + y_offset + 0.3], linewidth=line_width * display_factor, color=color_line)\n\n \n t_lead = lead_order[c * rows + i]\n \n step = 1.0/sample_rate\n if(show_lead_name):\n ax.text(x_offset + 0.07, y_offset - 0.5, lead_index[t_lead], fontsize=9 * display_factor)\n ax.plot(\n np.arange(0, len(ecg[t_lead])*step, step) + x_offset, \n ecg[t_lead] + y_offset,\n linewidth=line_width * display_factor, \n color=color_line\n )", "def mkRatePointsFig( rgb=True, outfile=None, ageaxis=True, logscale=False, presfig=False, **kwarg):\n from cosmo import agez, zfromt\n from hstsnpipe.tools.rates.ratetable import RATELISTALL, RATELISTLOWZ, RATELISTHIGHZ, R13,R13h\n from hstsnpipe.tools.figs import plotsetup\n from matplotlib import pyplot as pl\n\n if presfig : \n plotsetup.presfig()\n else :\n plotsetup.fullpaperfig(figsize=[8,5])\n\n if logscale : scale = 1e-4\n else : scale = 1\n\n ax1 = pl.axes( [0.1,0.12,0.87,0.76] )\n for i in range(len(RATELISTLOWZ)):\n R = RATELISTLOWZ[i]\n R.mfc='0.8'\n R.mec='0.8'\n R.color='0.8'\n R.mew=0\n R.marker='o'\n R.zerrplus = np.zeros( len( R.zerrplus ) )\n R.zerrminus = np.zeros( len( R.zerrplus ) )\n R.plot( thicksys=False, zorder=-10*i, scalerates=scale, **kwarg )\n for i in range(len(RATELISTHIGHZ)-1):\n R = RATELISTHIGHZ[i]\n R.ms=12\n R.zerrplus = np.zeros( len( R.zerrplus ) )\n R.zerrminus = np.zeros( len( R.zerrplus ) )\n pl.plot( R.z, R.rate, marker=R.marker, ms=12, mew=0, ls=' ',\n color='w', alpha=1, zorder=10*i )\n R.plot( thicksys=False, zorder=10*(i+1), scalerates=scale, alpha=0.3, **kwarg )\n\n R13.mfc='darkorange'\n R13.mec='darkred'\n R13.color='darkred'\n R13.ms=15\n R13.plot( thicksys=True, zorder=1000, scalerates=scale )\n\n #from matplotlib.font_manager import FontProperties\n #fs=FontProperties(size='9')\n\n ax = pl.gca()\n ax.set_xlim([-0.05, 2.81])\n pl.xlabel(\"Redshift\")\n\n if logscale : \n ax.set_yscale(\"log\",nonposy=\"clip\")\n ax.set_ylim([0.011*scale, 5.55*scale])\n\n pl.ylabel(r\"SNR ~$\\left[{\\rm yr}^{-1}~ {\\rm Mpc}^{{-3}}~ {{\\rm h}_{70}}^{3}\\right]$\")\n ax.yaxis.set_ticklabels( ['','$10^{-5}$', '$10^{-4}$', ''] )\n\n #ax1.text( 2.29, 0.32*scale, 'CANDELS', ha='left', va='top', color='darkred', rotation=-90 )\n ax1.text( 2.16, 0.32*scale, 'CANDELS', ha='left', va='top', color='darkred', rotation=-90 )\n ax1.text( 1.64, 0.28*scale, 'CLASH', ha='right', va='top', color='darkmagenta', rotation=-90 )\n ax1.text( 1.61, 1.02*scale, 'GOODS', ha='center', va='bottom', color='darkgreen', rotation=-90 )\n ax1.text( 1.54, 2.53*scale, 'CSS', ha='center', va='bottom', color='darkcyan', rotation=-90)\n ax1.text( 1.70, 1.75*scale, 'SDF', ha='center', va='bottom', color='0.2', rotation=-90 )\n\n ax1.text( 2.45, 3.0*scale, r'\\noindent Stat.+Syst.\\\\ Error',ha='left',va='center', color='darkred')\n ax1.text( 2.45, 1.7*scale, 'Syst. Only',ha='left',va='center', color='darkorange')\n ax1.plot( [2.27, 2.42], scale*np.array([ 1.37, 2.80]), marker=' ', ls='-', color='darkred', lw=0.8 )\n ax1.plot( [2.28, 2.42], scale*np.array([ 0.91, 1.71]), marker=' ', ls='-', color='darkorange', lw=0.8 )\n\n else : \n ax.set_ylim([0.0, 2.55])\n pl.ylabel(r\"SNR ~$\\left[ 10^{{-4}}~ {\\rm yr}^{-1}~ {\\rm Mpc}^{{-3}}~ {{\\rm h}_{70}}^{3}\\right]$\")\n\n if presfig: txtrot,txtshift=90,0.3\n else : txtrot,txtshift=0,0\n ax1.text( 2.33, 0.72, 'CANDELS', ha='left', va='top', color='darkred' )\n ax1.text( 1.64, 0.21, 'CLASH', ha='left', va='top', color='darkmagenta' )\n ax1.text( 1.27, 1.31+2*txtshift, 'GOODS', ha='left', va='top', color='darkgreen',rotation=txtrot )\n ax1.text( 1.13, 1.44+txtshift, 'CSS', ha='right', va='top', color='darkcyan',rotation=txtrot )\n ax1.text( 1.67, 1.15+txtshift, 'SDF', ha='right', va='top', color='0.2',rotation=txtrot )\n \n ax1.text( 1.9, 2.22, 'Stat. + Syst. Error',ha='left',va='center', color='darkred')\n ax1.text( 1.95, 2.0, 'Systematic Only',ha='left',va='center', color='darkorange')\n ax1.plot( [1.77, 1.92], [ 1.45, 2.15], marker=' ', ls='-', color='darkred', lw=0.8 )\n ax1.plot( [1.8, 2.02], [ 1.02, 1.93], marker=' ', ls='-', color='darkorange', lw=0.8 )\n\n axtop = ax1.twiny()\n axtop.set_xlim( ax1.get_xlim() )\n if ageaxis: \n ageticks = np.array( [13,8,5,3] )\n zageticks = zfromt( ageticks )\n axtop.set_xticks( zageticks )\n axtop.set_xticklabels( ageticks )\n axtop.set_xlabel('Age of Universe [Gyr]')\n\n\n pl.draw()\n if outfile: \n pl.savefig(outfile)\n return(outfile)\n else:\n return( ax )", "def plotting_gaussian_curves():\n fig, (ax1, ax2, ax3) = plt.subplots(1, 3)\n x = np.arange(1, 100)\n xbar = 50.0\n s = 15.0\n a = 20.0\n c = 0.0\n singlecurve = zoom_and_gauss_general.gauss(x, xbar, s, a, c)\n\n # Balmer Emission Lines\n x = np.arange(1, 100)\n xbar = 50.0\n s1 = 15.0\n a1 = 20.0\n s2 = 25.0\n a2 = -2.0\n c = 0.0\n\n doublecurve = zoom_and_gauss_general.double_gauss(x, xbar,\n s1, a1, c, s2, a2)\n\n positive = zoom_and_gauss_general.gauss(x, xbar, s1, a1, doublecurve[0])\n negative = zoom_and_gauss_general.gauss(x, xbar, s2, a2, c)\n\n # Oxygen Two Line\n x = np.arange(1, 100)\n xbar = 40.0\n s1 = 8.0\n a1 = 20.0\n s2 = 8.0\n a2 = 30.0\n c = 0.0\n\n oxycurve = oxy2_gauss(x, xbar, s1, a1, c, s2, a2)\n\n xbar3 = 40.0\n xbar4 = 63.5\n s3 = 8.0\n a3 = 20.0\n\n s4 = 8.0\n a4 = 30.0\n\n positive1 = zoom_and_gauss_general.gauss(x, xbar3, s3, a3, oxycurve[0])\n positive2 = zoom_and_gauss_general.gauss(x, xbar4, s4, a4, oxycurve[0])\n\n ax1.plot(x, singlecurve)\n ax2.plot(x, doublecurve)\n ax2.plot(x, positive, 'r', linestyle='--')\n ax2.plot(x, negative, 'g', linestyle='--')\n ax3.plot(x, oxycurve)\n ax3.plot(x, positive1, 'r', linestyle='--')\n ax3.plot(x, positive2, 'r', linestyle='--', )\n ax1.set_yticklabels([])\n ax2.set_yticklabels([])\n ax1.set_ylim(-3, 25.5)\n ax2.set_ylim(-3, 20.5)\n ax3.set_ylim(-3, 30.5)\n ax3.set_yticklabels([])\n ax1.set_title('Single Gaussian Curve')\n ax2.set_title('Balmer Fitting with Gaussian Curves')\n ax3.set_title('[OII] Fitting with Gaussian Curves')\n txt1 = '(A)'\n txt2 = '(B)'\n txt3 = '(C)'\n ax1.annotate(txt1, [0.95, 0.95], xycoords='axes fraction', va='top',\n ha='right', fontsize='10')\n ax2.annotate(txt2, [0.95, 0.95], xycoords='axes fraction', va='top',\n ha='right', fontsize='10')\n ax3.annotate(txt3, [0.95, 0.95], xycoords='axes fraction', va='top',\n ha='right', fontsize='10')\n\n plt.show()", "def plot_rg(self, **kwargs):\n # if self.obs_data is not None:\n # rg = self.obs_data\n # else:\n rg = kwargs.get('data', self.rg())\n err_bars = self.block_error(observable=rg)\n self.axis.set_ylabel(r\"Rg ($\\AA$)\", fontsize=self.label_fsize)\n self.axis.set_xlabel(\"T (K)\", fontsize=self.label_fsize)\n p = self.axis.plot(self.temperatures, rg.mean(axis=1), self.style, label=self.label, color=self.color)\n if self.protein in self.florys:\n flory = self.florys[self.protein][0, :]\n if min(flory) < 0.5 and max(flory) > 0.5:\n Tc = self.find_Tc(florys=flory)\n idx_sup = np.where(self.temperatures == np.min(self.temperatures[self.temperatures > Tc]))[0][0]\n idx_inf = np.where(self.temperatures == np.max(self.temperatures[self.temperatures < Tc]))[0][0]\n slope = (rg[idx_sup, :].mean() - rg[idx_inf, :].mean()) / (\n self.temperatures[idx_sup] - self.temperatures[idx_inf])\n intersect = rg[idx_sup, :].mean() - slope * self.temperatures[idx_sup]\n rgC = slope * Tc + intersect\n # TODO : HARDCODE...\n self.axis.set_ylim(25, 95)\n ylim = self.axis.get_ylim()\n self.axis.axhspan(rgC, ylim[1], color=\"green\", alpha=0.08)\n self.axis.axhspan(ylim[0], rgC, color=\"red\", alpha=0.08)\n pline, capline, barline = self.axis.errorbar(self.temperatures, rg.mean(axis=1), yerr=err_bars, uplims=True,\n lolims=True, fmt='', color=p[0].get_color())\n capline[0].set_marker('_')\n capline[0].set_markersize(10)\n capline[1].set_marker('_')\n capline[1].set_markersize(10)\n return rg", "def plot(self, idx, codastyle='-'):\n\n # Get the trace and its metadata\n trace_attrs, trace = self.__get_trace(idx)\n\n # Arrival times vertical lines\n p_time = trace_attrs['p_arrival_sample']\n s_time = trace_attrs['s_arrival_sample']\n e_time = trace_attrs['coda_end_sample']\n\n # Set up the plot\n fig, (ax_e, ax_n, ax_z) = plt.subplots(nrows=3, sharex=True)\n fig.set_size_inches(8, 6)\n\n # Plot the e signal\n ax_e.plot(trace[:, 0], color='k')\n ax_e.axvline(p_time,\n color='dodgerblue',\n label='P-Arrival',\n linestyle=codastyle)\n ax_e.axvline(s_time,\n color='orangered',\n label='S-Arrival',\n linestyle=codastyle)\n ax_e.axvline(e_time,\n color='green',\n label='Coda End',\n linestyle=codastyle)\n\n # Plot the n signal\n ax_n.plot(trace[:, 1], color='k')\n ax_n.axvline(p_time,\n color='dodgerblue',\n label='P-Arrival',\n linestyle=codastyle)\n ax_n.axvline(s_time,\n color='orangered',\n label='S-Arrival',\n linestyle=codastyle)\n ax_n.axvline(e_time,\n color='green',\n label='Coda End',\n linestyle=codastyle)\n\n # Plot the z signal\n ax_z.plot(trace[:, 1], color='k')\n ax_z.axvline(p_time,\n color='dodgerblue',\n label='P-Arrival',\n linestyle=codastyle)\n ax_z.axvline(s_time,\n color='orangered',\n label='S-Arrival',\n linestyle=codastyle)\n ax_z.axvline(e_time,\n color='green',\n label='Coda End',\n linestyle=codastyle)\n\n # Set up the rest of the plot\n handles, labels = ax_e.get_legend_handles_labels()\n fig.legend(handles, labels, loc='center right')\n fig.suptitle(f'Trace {trace_attrs[\"trace_name\"]}')\n\n return fig", "def plot_ROC_zoom():\r\n \r\n fpr = dict()\r\n tpr = dict()\r\n threshold = dict()\r\n roc_auc = dict()\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.set_aspect('equal')\r\n\r\n colors = ['aqua', 'red', 'royalblue']\r\n for i, color in zip(range(len(y_prob_list)), colors):\r\n fpr[i], tpr[i], threshold[i] = roc_curve(y_test_list[i], y_prob_list[i])\r\n roc_auc[i] = auc(fpr[i], tpr[i])\r\n # plot ROC curve\r\n plt.plot(\r\n fpr[i],\r\n tpr[i],\r\n color=color,\r\n linewidth=3,\r\n label='AUC %0.3f'%roc_auc[i]\r\n )\r\n\r\n plt.legend(loc='lower right', prop={'size': 13, 'weight': 'bold'})\r\n # plt.plot([0, 1], [0, 1], 'r--', linewidth=2)\r\n plt.xlim([-0.01, 0.20])\r\n plt.ylim([0.8, 1.01])\r\n plt.xticks([0, 0.05, 0.10, 0.15, 0.20], fontsize=14, fontweight='bold')\r\n plt.yticks([0.80, 0.85, 0.90, 0.95, 1.00], fontsize=14, fontweight='bold')\r\n ax.axhline(y=0.8, color='k', linewidth=3)\r\n ax.axhline(y=1.01, color='k', linewidth=3)\r\n ax.axvline(x=-0.01, color='k', linewidth=3)\r\n ax.axvline(x=0.20, color='k', linewidth=3)\r\n\r\n plt.ylabel('True Positive Rate', fontweight='bold', fontsize=14)\r\n plt.xlabel('False Positive Rate', fontweight='bold', fontsize=14)\r\n plt.xticks(fontsize=14, fontweight='bold')\r\n plt.yticks(fontsize=14, fontweight='bold')\r\n ax.tick_params(direction='out', length=6, width=2, colors='k',\r\n grid_color='k', grid_alpha=0.5)\r\n\r\n plt.grid(True)\r\n plt.show()\r\n plt.savefig(os.path.join(result_dir, 'ROC_exvivo_2.png'), format='png', dpi=600)\r\n plt.close()", "def plotRocCurve(fpr, tpr, roc_auc):\n \n fig, axs = plt.subplots(11, 2, sharex=True, sharey=True, figsize=(20, 150))\n\n row_idx = 0\n col_idx = 0\n\n for annotator in fpr:\n lw = 2\n\n axs[row_idx, col_idx].plot(fpr[annotator], tpr[annotator], color='darkorange',\n lw=lw, label='ROC curve (area = %0.4f)' % roc_auc[annotator])\n axs[row_idx, col_idx].plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n axs[row_idx, col_idx].legend(loc=\"lower right\")\n axs[row_idx, col_idx].set_title(annotator)\n\n if col_idx == 0:\n col_idx += 1\n else:\n col_idx = 0 \n row_idx += 1\n \n plt.show()", "def Make_Binned_ANN_ROC_Curves(title,Signal_title,Background_title,bins,log=False):\n #hsv = plt.get_cmap('hsv')\n #color = hsv(np.linspace(0,1.0,len(bins)-1))\n #color = ['b', 'g', 'r', 'c', 'm', 'y']\n if len(bins)<=6:\n color = ['red','green','blue','orange','brown']\n else:\n color = ['deepskyblue','rosybrown','olivedrab','royalblue','firebrick','chartreuse','navy','red','darkorchid','lightseagreen','mediumvioletred','blue']\n nbins = 60\n\tdis_string = \"ANN_\"\n\n Signal_file = rt.TFile(\"Thesis_Plots/root_files/{}_ANN_histograms.root\".format(Signal_title),\"READ\")\n Background_file = rt.TFile(\"Thesis_Plots/root_files/{}_ANN_histograms.root\".format(Background_title),\"READ\")\n\n plt.figure(\"ROC\")\n plt.clf()\n\n for bin_ in range(len(bins)-1):\n Dis_Signal_Eff = FCM.Get_ROC_Efficiencies(Signal_file.Get(dis_string+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),nbins,0)\n Dis_BG_Eff = FCM.Get_ROC_Efficiencies(Background_file.Get(dis_string+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),nbins,0)\n CSV_Signal_Eff = FCM.Get_ROC_Efficiencies(Signal_file.Get(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),ratio_bins,0)\n CSV_BG_Eff = FCM.Get_ROC_Efficiencies(Background_file.Get(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),ratio_bins,0)\n if log:\n plt.semilogy(Dis_Signal_Eff,Dis_BG_Eff, color = color[bin_], linestyle = '-',label=str(bins[bin_])+\"_\"+str(bins[bin_+1]))\n plt.semilogy(CSV_Signal_Eff,CSV_BG_Eff, color = color[bin_],linestyle = '--',)\n\n else:\n plt.plot(Dis_Signal_Eff,1-Dis_BG_Eff, color = color[bin_], linestyle = '-',label=str(bins[bin_])+\"_\"+str(bins[bin_+1]))\n plt.plot(CSV_Signal_Eff,1-CSV_BG_Eff, color = color[bin_],linestyle = '--',)\n\n if log:\n\t\tif diff:\n\t\t\tplt.semilogy([0,0],[0,0],'k-',label = 'L4-L1')\n\t\telse:\n \tplt.semilogy([0,0],[0,0],'k-',label = 'L4/L1')\n plt.semilogy([0,0],[0,0],'k-.',label = 'CSV')\n plt.semilogy([0,1],[0.1,0.1],'k:')\n plt.xlabel(r\"$\\epsilon$_signal\")\n plt.ylabel(r\"$\\epsilon$_background\")\n plt.legend(loc=4)\n else:\n\t\tif diff:\n\t\t\tplt.plot([0,0],[0,0],'k-',label = 'L4-L1')\n\t\telse:\n \tplt.plot([0,0],[0,0],'k-',label = 'L4/L1')\n plt.plot([0,0],[0,0],'k-.',label = 'CSV')\n #plt.plot([0,1],[0.9,0.9],'k:',label=\"10% mistag\")\n plt.plot([0,1],[0.9,0.9],'k:')\n plt.xlabel(r\"$\\epsilon$_signal\")\n plt.ylabel(r\"1-$\\epsilon$_background\")\n plt.legend(loc=3)\n #plt.title(title+\"_ROC-Curves\")\n\n plt.savefig(\"Thesis_Plots/{}_ROC_Curves.png\".format(title))\n print \"saved as Thesis_Plots/{}_ROC_Curves.png\".format(title)", "def plot(r,g,b,saturation,value,date):\n f, (ax1, ax2, ax3) = plt.subplots(3,1, sharex=True)\n # visible RGB\n ax1.plot(date,r,'-r',label='red')\n ax1.plot(date,g,'-g',label='green')\n ax1.plot(date,b,'-b',label='blue')\n ax1.set_ylabel('rho')\n # saturation compare\n S = [sat1 for (sat1,sat2) in saturation]\n s = [sat2 for (sat1,sat2) in saturation] \n ax2.plot(date,S,'.r',label='HSV_sat')\n ax2.plot(date,s,'.g',label='SV_sat')\n ax2.set_ylabel('sat.')\n # value compare\n V = [v1 for (v1,v2) in value]\n v = [v2 for (v1,v2) in value] \n ax3.plot(date,V,'.r',label='HSV_value')\n ax3.plot(date,v,'.g',label='SV_value')\n ax3.set_ylabel('value')", "def construct_plot(self, amprtb):\n self.fig, [[self.ax1, self.ax2], [self.ax3, self.ax4]] = \\\n plt.subplots(2, 2, figsize=(10, 10),\n subplot_kw={'projection': self.projection})\n ind1, ind2 = amprtb._get_scan_indices(\n self.scanrange, self.timerange, False)\n\n # 10 GHz plot\n stuff = amprtb.plot_ampr_track(\n var='10'+self.chan, latrange=self.latrange,\n lonrange=self.lonrange, parallels=self.parallels,\n meridians=self.meridians, title='', wmts_layer=self.wmts_layer,\n clevs=self.clevs, cmap=self.cmap, show_track=self.show_track,\n maneuver=self.maneuver, scanrange=self.scanrange,\n show_grid=self.show_grid, equator=self.equator,\n show_qc=self.show_qc, resolution=self.resolution,\n projection=self.projection, ax=self.ax1, fig=self.fig,\n verbose=self.verbose, timerange=self.timerange, return_flag=True)\n self.ax1.set_title(self.make_title('10', amprtb, ind1, ind2))\n\n # 19 GHz plot\n amprtb.plot_ampr_track(\n var='19'+self.chan, latrange=self.latrange,\n lonrange=self.lonrange, parallels=self.parallels,\n meridians=self.meridians, title='', wmts_layer=self.wmts_layer,\n clevs=self.clevs, cmap=self.cmap, show_track=self.show_track,\n maneuver=self.maneuver, scanrange=self.scanrange,\n show_grid=self.show_grid, equator=self.equator,\n show_qc=self.show_qc, resolution=self.resolution,\n projection=self.projection, ax=self.ax2, fig=self.fig,\n verbose=self.verbose, timerange=self.timerange)\n self.ax2.set_title(self.make_title('19', amprtb, ind1, ind2))\n\n # 37 GHz plot\n amprtb.plot_ampr_track(\n var='37'+self.chan, latrange=self.latrange,\n lonrange=self.lonrange, parallels=self.parallels,\n meridians=self.meridians, title='', wmts_layer=self.wmts_layer,\n clevs=self.clevs, cmap=self.cmap, show_track=self.show_track,\n maneuver=self.maneuver, scanrange=self.scanrange,\n show_grid=self.show_grid, equator=self.equator,\n show_qc=self.show_qc, resolution=self.resolution,\n projection=self.projection, ax=self.ax3, fig=self.fig,\n verbose=self.verbose, timerange=self.timerange)\n self.ax3.set_title(self.make_title('37', amprtb, ind1, ind2))\n\n # 85 GHz plot\n amprtb.plot_ampr_track(\n var='85'+self.chan, latrange=self.latrange,\n lonrange=self.lonrange, parallels=self.parallels,\n meridians=self.meridians, title='', wmts_layer=self.wmts_layer,\n clevs=self.clevs, cmap=self.cmap, show_track=self.show_track,\n maneuver=self.maneuver, scanrange=self.scanrange,\n show_grid=self.show_grid, equator=self.equator,\n show_qc=self.show_qc, resolution=self.resolution,\n projection=self.projection, ax=self.ax4, fig=self.fig,\n verbose=self.verbose, timerange=self.timerange)\n self.ax4.set_title(self.make_title('85', amprtb, ind1, ind2))\n\n # plt.tight_layout()\n return True", "def _roc_plot(self, roc_curves):\n # figure\n p = default_figure(\n {\n \"x_range\": (-0.01, 1.1),\n \"y_range\": (-0.01, 1.1),\n \"tools\": \"pan,wheel_zoom,box_zoom,reset\",\n \"toolbar_location\": \"right\"\n }\n )\n\n # main lines added to the plot\n self._default_models_lines(p, roc_curves)\n\n # baseline comparison\n p.line(\n [0, 1], # line x=y\n [0, 1],\n line_dash=\"dashed\",\n line_width=1,\n color=self.plot_design.models_dummy_color,\n legend_label=\"Random Baseline\",\n muted_alpha=0.5 # clicked line in the Legend will be muted\n )\n\n # plot specific styling\n p.legend.location = \"bottom_right\"\n p.xaxis.axis_label = \"False Positive Rate\"\n p.yaxis.axis_label = \"True Positive Rate\"\n\n return p", "def plot_ROC():\r\n fpr = dict()\r\n tpr = dict()\r\n roc_auc = dict()\r\n threshold = dict()\r\n \r\n for i in range(n_classes):\r\n \r\n fpr[i], tpr[i], threshold[i] = roc_curve(y_test[:, i], y_pred[:, i])\r\n \r\n roc_auc[i] = auc(fpr[i], tpr[i])\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.set_aspect('equal')\r\n colors = cycle(['aqua', 'red', 'purple', 'royalblue', 'black'])\r\n \r\n for i, color in zip(range(n_classes), colors):\r\n \r\n plt.plot(\r\n fpr[i],\r\n tpr[i],\r\n color=color,\r\n linewidth=3,\r\n #label='Class {0} (AUC {1:0.3f})'\r\n label='AUC {1:0.2f}' \r\n ''.format(i+1, roc_auc[i])\r\n )\r\n\r\n #plt.plot([0, 1], [0, 1], 'k--', linewidth=3)\r\n plt.xlim([-0.03, 1])\r\n plt.ylim([0, 1.03])\r\n ax.axhline(y=0, color='k', linewidth=4)\r\n ax.axhline(y=1.03, color='k', linewidth=4)\r\n ax.axvline(x=-0.03, color='k', linewidth=4)\r\n ax.axvline(x=1, color='k', linewidth=4) \r\n plt.xticks([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=16, fontweight='bold')\r\n plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=16, fontweight='bold')\r\n #plt.xlabel('False Positive Rate', fontweight='bold', fontsize=16)\r\n #plt.ylabel('True Positive Rate', fontweight='bold', fontsize=16)\r\n plt.legend(loc='lower right', prop={'size': 14, 'weight': 'bold'}) \r\n plt.grid(True)\r\n\r\n ROC_filename = 'ROC' + '_' + \\\r\n str(DNN_Model) + '_' + \\\r\n strftime(\"%d-%b-%Y-%H-%M-%S\", gmtime()) + '.png'\r\n \r\n plt.savefig(\r\n os.path.join(result_dir, ROC_filename),\r\n format='png',\r\n dpi=600\r\n )\r\n\r\n plt.show()\r\n plt.close()", "def plot_curves():\n lm = np.arange(0, 1.8, .01)\n vm = np.arange(-1.2, 1.2, .01)\n lt = np.arange(0, 1.07, .01)\n plt.subplot(2,1,1)\n plt.plot(lm, force_length_muscle(lm), 'r')\n plt.plot(lm, force_length_parallel(lm), 'g')\n plt.plot(lt, force_length_tendon(lt), 'b')\n plt.legend(('CE', 'PE', 'SE'))\n plt.xlabel('Normalized length')\n plt.ylabel('Force scale factor')\n plt.subplot(2, 1, 2)\n plt.plot(vm, force_velocity_muscle(vm), 'k')\n plt.xlabel('Normalized muscle velocity')\n plt.ylabel('Force scale factor')\n plt.tight_layout()\n plt.show()", "def plot(self, ax=None, show=True, **kwargs):\n\n import matplotlib.pyplot as plt\n\n pvalues = self.pvalues()\n poivalues = self.poinull.value\n poiname = self.poinull.name\n alpha = self.alpha\n\n if ax is None:\n _, ax = plt.subplots(figsize=(10, 8))\n\n if self.CLs:\n cls_clr = \"r\"\n clsb_clr = \"b\"\n else:\n cls_clr = \"b\"\n clsb_clr = \"r\"\n\n ax.plot(poivalues, pvalues[\"cls\"], label=\"Observed CL$_{s}$\",\n marker=\".\", color='k', markerfacecolor=cls_clr,\n markeredgecolor=cls_clr, linewidth=2.0, ms=11)\n ax.plot(poivalues, pvalues[\"clsb\"], label=\"Observed CL$_{s+b}$\",\n marker=\".\", color='k', markerfacecolor=clsb_clr,\n markeredgecolor=clsb_clr, linewidth=2.0, ms=11,\n linestyle=\":\")\n ax.plot(poivalues, pvalues[\"clb\"], label=\"Observed CL$_{b}$\",\n marker=\".\", color='k', markerfacecolor=\"k\",\n markeredgecolor=\"k\", linewidth=2.0, ms=11)\n ax.plot(poivalues, pvalues[\"exp\"],\n label=\"Expected CL$_{s}-$Median\", color='k',\n linestyle=\"--\", linewidth=1.5, ms=10)\n ax.plot([poivalues[0], poivalues[-1]], [alpha, alpha], color='r',\n linestyle='-', linewidth=1.5)\n ax.fill_between(poivalues, pvalues[\"exp\"], pvalues[\"exp_p1\"],\n facecolor=\"lime\",\n label=\"Expected CL$_{s} \\\\pm 1 \\\\sigma$\")\n ax.fill_between(poivalues, pvalues[\"exp\"], pvalues[\"exp_m1\"],\n facecolor=\"lime\")\n ax.fill_between(poivalues, pvalues[\"exp_p1\"], pvalues[\"exp_p2\"],\n facecolor=\"yellow\",\n label=\"Expected CL$_{s} \\\\pm 2 \\\\sigma$\")\n ax.fill_between(poivalues, pvalues[\"exp_m1\"], pvalues[\"exp_m2\"],\n facecolor=\"yellow\")\n\n if self.CLs:\n ax.set_ylim(-0.01, 1.1)\n else:\n ax.set_ylim(-0.01, 0.55)\n ax.set_ylabel(\"p-value\")\n ax.set_xlabel(poiname)\n ax.legend(loc=\"best\", fontsize=14)\n\n if show:\n plt.show()", "def traces(mndata,Params,srate,imagepath):\n\t#plot high gamma traces\n\t#data should be bandpassed (todo)\n\t#resample to srate\n\tst = resample(Params[\"st\"],srate)\n\ten = resample(Params[\"en\"],srate)\n\tbl_en = resample(Params[\"bl_en\"],srate)\n\tbl_st = resample(Params[\"bl_st\"],srate)\n\tplot_tp = resample(Params[\"plot\"],srate)\n\tcue = resample(500,srate)\n\t\n\tcolors = ['red','orange','green','blue']\n\tx = np.array(range(st,en+1))\n\tf, (ax,ax2) = plt.subplots(1,2, sharex = False)\n\tax.axhline(y = 0,color = 'k',linewidth=2)\n\tax.axvline(x = 0,color='k',linewidth=2)\n\tax.axvline(x = cue,color = 'gray',linewidth = 2)\n\tax.axvline(x = cue+cue,color = 'gray',linewidth = 2)\n\tax.axvspan(cue, cue+cue, facecolor='0.5', alpha=0.25,label = 'cue')\n\n\tfor j in range(len(Params[\"conditions\"])):\n\t\tcondition = Params['conditions'][j]\n\t\ty = mndata[condition]['data']\n\t\tax.plot(x,y, label = condition,linewidth = 2,color = colors[j])\n\t\n\tax.set_ylim((-30,85))\n\tax.set_xlim(st,en)\n\tax.legend()\n\tax.xaxis.set_ticklabels(['', '0', '','500', '', '1000', '', '1500', '', '2000','','2500','', '3000'],minor=False)\n\tax.xaxis.set_ticks(range(st,en,plot_tp))\n\n\tax.set_xlabel(\"time (ms)\")\n\tax.set_ylabel(\"% change baseline\")\n\tax.set_title('Analytic Amplitude - High Gamma (70-150Hz)', fontsize = 18)\n\n\t#plot brain with elec location\n\t#brain = plt.imread(imagepath)\n\t#aa = pylab.mean(brain,2)\n\t#ax2.imshow(aa)\n\t#a2.gray()\n\n\t#brain = Image.open(imagepath)\n\t#ax2.set_axis_off()\n\t#im = plt.imshow(brain, origin = 'lower')\n\n\t#brain = _png.read_png(imagepath)\n\t#imagebox = OffsetImage(brain,zoom =5)\n\t#ab = AnnotationBbox(imagebox,)\n\n\tim = Image.open(imagepath)\n\tax2.imshow(im,aspect = 'auto',origin = 'lower')\n\tax2.set_xlim((0,750))\n\tax2.set_title('Electrode Location',fontsize = 18)\n\n\n\n\treturn f, (ax, ax2)", "def plot_tuning_curves(direction_rates, title):\r\n\r\n ax1 = plt.subplot(2,2,1)\r\n ax1.set_title(title)\r\n\r\n x = direction_rates[:, 0]\r\n y = direction_rates[:, 1]\r\n\r\n ax1.bar(x, y, width=45 )\r\n ax1.axis([0, 360, 0, max(y) + max(y)*0.1])\r\n ax1.set_ylabel('Firing Rate (spikes/s)')\r\n ax1.set_xlabel('Direction of Motions (degrees)')\r\n\r\n\r\n ax2 = plt.subplot(2, 2, 2, polar=True)\r\n ax2.set_title(title)\r\n spikescount = np.append(y, y[0])\r\n theta = np.arange(0, 361, 45)*np.pi/180\r\n ax2.plot(theta, spikescount, label='Firing Rate (spikes/s)')\r\n ax2.legend(loc=8)", "def plotPsCurve(mcoolsPath:list,celltypeNames:list,chroms:list,resolution=100000,title=\"P(s) curve\",plotType=\"interaction\",base=1.1,log_x=True,log_y=True):\n import plotly.express as px\n from IPython.display import Image\n\n #Calculate P(s) data, get a 3 column pd.DataFrame with (bin,resolution,celltype)\n psDataAll = []\n for i in range(len(mcoolsPath)):\n psDataAll.append(compartment.getPsData(mcoolsPath[i],[\"chr\"+str(i+1) for i in range(len(chroms))],resolution=resolution,celltype=celltypeNames[i],base=base)) \n merged = pd.concat(psDataAll)\n\n data = pd.merge(merged,merged.groupby(\"celltype\").sum(),how=\"left\",on=\"celltype\").assign(prob= lambda df: df.aveCount_x/df.aveCount_y)\n\n fig = px.line(x=data[\"bin_x\"]*resolution,y=data[\"prob\"],color=data[\"celltype\"],title=title,log_x=log_x,log_y=log_y).update_layout(template='simple_white')\n fig.update_layout(width=800,height=600)\n fig.update_layout(xaxis_title=\"Genomic Distance(bp)\",\n yaxis_title=\"Contact Probability\")\n if(plotType == \"interaction\"):\n return fig\n else : return Image(fig.to_image(format=\"png\", engine=\"kaleido\"))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Main routine for plotting a single roccurve
def plot_single_roccurve(signals, bkgs, cut_function, cut_values, ax=None): # Get a default ax if none is given if ax is None: import matplotlib.pyplot as plt fig = plt.figure(figsize=(8,8)) ax = fig.gca() # Plot the base line ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray') # Plot the single roccurve line = plot_roccurve(signals, bkgs, cut_function, cut_values, ax=ax) line.set_label(bkgs[0].get_category()) # Plot settings ax.set_xlim(0.0, 1.05) ax.set_ylim(0.0, 1.05) ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE) ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE) ax.legend(fontsize=DEFAULT_FONTSIZE) return ax
[ "def plot_roccurve(signals, bkgs, cut_function, cut_values, ax):\n eff_sig, eff_bkg, n_pass_sig, n_pass_bkg, n_total_sig, n_total_bkg = roccurve(signals, bkgs, cut_function, cut_values)\n return _draw_roccurve(eff_sig, eff_bkg, cut_values, ax)", "def plot_ROC_zoom():\r\n \r\n fpr = dict()\r\n tpr = dict()\r\n threshold = dict()\r\n roc_auc = dict()\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.set_aspect('equal')\r\n\r\n colors = ['aqua', 'red', 'royalblue']\r\n for i, color in zip(range(len(y_prob_list)), colors):\r\n fpr[i], tpr[i], threshold[i] = roc_curve(y_test_list[i], y_prob_list[i])\r\n roc_auc[i] = auc(fpr[i], tpr[i])\r\n # plot ROC curve\r\n plt.plot(\r\n fpr[i],\r\n tpr[i],\r\n color=color,\r\n linewidth=3,\r\n label='AUC %0.3f'%roc_auc[i]\r\n )\r\n\r\n plt.legend(loc='lower right', prop={'size': 13, 'weight': 'bold'})\r\n # plt.plot([0, 1], [0, 1], 'r--', linewidth=2)\r\n plt.xlim([-0.01, 0.20])\r\n plt.ylim([0.8, 1.01])\r\n plt.xticks([0, 0.05, 0.10, 0.15, 0.20], fontsize=14, fontweight='bold')\r\n plt.yticks([0.80, 0.85, 0.90, 0.95, 1.00], fontsize=14, fontweight='bold')\r\n ax.axhline(y=0.8, color='k', linewidth=3)\r\n ax.axhline(y=1.01, color='k', linewidth=3)\r\n ax.axvline(x=-0.01, color='k', linewidth=3)\r\n ax.axvline(x=0.20, color='k', linewidth=3)\r\n\r\n plt.ylabel('True Positive Rate', fontweight='bold', fontsize=14)\r\n plt.xlabel('False Positive Rate', fontweight='bold', fontsize=14)\r\n plt.xticks(fontsize=14, fontweight='bold')\r\n plt.yticks(fontsize=14, fontweight='bold')\r\n ax.tick_params(direction='out', length=6, width=2, colors='k',\r\n grid_color='k', grid_alpha=0.5)\r\n\r\n plt.grid(True)\r\n plt.show()\r\n plt.savefig(os.path.join(result_dir, 'ROC_exvivo_2.png'), format='png', dpi=600)\r\n plt.close()", "def _roc_plot(self, roc_curves):\n # figure\n p = default_figure(\n {\n \"x_range\": (-0.01, 1.1),\n \"y_range\": (-0.01, 1.1),\n \"tools\": \"pan,wheel_zoom,box_zoom,reset\",\n \"toolbar_location\": \"right\"\n }\n )\n\n # main lines added to the plot\n self._default_models_lines(p, roc_curves)\n\n # baseline comparison\n p.line(\n [0, 1], # line x=y\n [0, 1],\n line_dash=\"dashed\",\n line_width=1,\n color=self.plot_design.models_dummy_color,\n legend_label=\"Random Baseline\",\n muted_alpha=0.5 # clicked line in the Legend will be muted\n )\n\n # plot specific styling\n p.legend.location = \"bottom_right\"\n p.xaxis.axis_label = \"False Positive Rate\"\n p.yaxis.axis_label = \"True Positive Rate\"\n\n return p", "def plot_roc_curve(test_y, P_base_learners, P_ensemble, labels, ens_label):\n plt.figure(figsize=(10, 8))\n plt.plot([0, 1], [0, 1], 'k--')\n \n cm = [plt.cm.rainbow(i)\n for i in np.linspace(0, 1.0, P_base_learners.shape[1] + 1)]\n \n for i in range(P_base_learners.shape[1]):\n p = P_base_learners[:, i]\n fpr, tpr, _ = roc_curve(test_y, p)\n plt.plot(fpr, tpr, label=labels[i], c=cm[i + 1])\n\n fpr, tpr, _ = roc_curve(test_y, P_ensemble)\n plt.plot(fpr, tpr, label=ens_label, c=cm[0]) \n plt.xlabel('False positive rate')\n plt.ylabel('True positive rate')\n plt.title('ROC curve')\n plt.legend(frameon=False)\n plt.savefig('G:/Cardiac/Roc_curve.jpg')\n plt.show()", "def plot_roc_single(fpr, tpr, roc_auc, nclass):\n plt.figure()\n lw = 2\n plt.plot(fpr[nclass], tpr[nclass], color='darkorange',\n lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[nclass])\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic example')\n plt.legend(loc=\"lower right\")\n plt.show()", "def plot_ROC(self):\n\n fpr, tpr, thresholds = roc_curve(self.targets, self.outputs)\n plt.plot(fpr, tpr)\n plt.axis([0, 1, 0, 1])\n plt.xlabel('FP Rate')\n plt.ylabel('TP Rate')\n plt.show()", "def orb_pos_plot(r1,r2,t):\n plt.figure(figsize=(10,8))\n plt.plot(t, r1, label = r'$\\alpha$ Cen A')\n plt.plot(t, r2, label = r'$\\alpha$ Cen A')\n plt.xlim()\n plt.ylim()\n plt.title('Position Vector Curve', fontsize=20)\n plt.ylabel(\"Position Vector, r (in AU)\", fontsize=18)\n plt.xlabel(\"Time (in years)\", fontsize=18)\n plt.xticks(fontsize=16)\n plt.yticks(fontsize=16) \n plt.legend(fontsize=16)\n #plt.grid()\n plt.show()", "def plotRocCurve(fpr, tpr, roc_auc):\n \n fig, axs = plt.subplots(11, 2, sharex=True, sharey=True, figsize=(20, 150))\n\n row_idx = 0\n col_idx = 0\n\n for annotator in fpr:\n lw = 2\n\n axs[row_idx, col_idx].plot(fpr[annotator], tpr[annotator], color='darkorange',\n lw=lw, label='ROC curve (area = %0.4f)' % roc_auc[annotator])\n axs[row_idx, col_idx].plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n axs[row_idx, col_idx].legend(loc=\"lower right\")\n axs[row_idx, col_idx].set_title(annotator)\n\n if col_idx == 0:\n col_idx += 1\n else:\n col_idx = 0 \n row_idx += 1\n \n plt.show()", "def roq_plot(\n pressure,\n roq_points,\n minimum,\n maximum,\n p_monolayer,\n roq_monolayer,\n ax=None\n):\n # Generate the figure if needed\n if ax is None:\n _, ax = plt.pyplot.subplots(figsize=(6, 4))\n\n ax.plot(\n pressure,\n roq_points,\n label='all points',\n **POINTS_ALL_STYLE,\n )\n ax.plot(\n pressure[minimum:maximum],\n roq_points[minimum:maximum],\n label='chosen points',\n **POINTS_SEL_STYLE,\n )\n ax.plot(\n p_monolayer,\n roq_monolayer,\n marker='X',\n markersize=10,\n linestyle='',\n color='k',\n label='monolayer point'\n )\n ax.set_title(\"Rouquerol plot\")\n ax.set_xlabel('p/p°', fontsize=15)\n ax.set_ylabel('$n ( 1 - p/p°)$', fontsize=10)\n ax.legend(loc='best')\n\n return ax", "def test5():\n\t######### input ########\n\tr0 = 0.\n\tr1 = 1.\n\teps = 1e-9\n\tnpoints = 500\n\t########################\n\tprint \"\\nTEST 5\"\n\trvals = rspace0(r0,r1,eps,npoints)\n\tprint \"rvals = \", rvals\n\tplt.plot(rvals,'kx')\n\tplt.show()\n\t## end\n\tprint \"END TEST 5\\n\"", "def plot_ROC():\r\n fpr = dict()\r\n tpr = dict()\r\n roc_auc = dict()\r\n threshold = dict()\r\n \r\n for i in range(n_classes):\r\n \r\n fpr[i], tpr[i], threshold[i] = roc_curve(y_test[:, i], y_pred[:, i])\r\n \r\n roc_auc[i] = auc(fpr[i], tpr[i])\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.set_aspect('equal')\r\n colors = cycle(['aqua', 'red', 'purple', 'royalblue', 'black'])\r\n \r\n for i, color in zip(range(n_classes), colors):\r\n \r\n plt.plot(\r\n fpr[i],\r\n tpr[i],\r\n color=color,\r\n linewidth=3,\r\n #label='Class {0} (AUC {1:0.3f})'\r\n label='AUC {1:0.2f}' \r\n ''.format(i+1, roc_auc[i])\r\n )\r\n\r\n #plt.plot([0, 1], [0, 1], 'k--', linewidth=3)\r\n plt.xlim([-0.03, 1])\r\n plt.ylim([0, 1.03])\r\n ax.axhline(y=0, color='k', linewidth=4)\r\n ax.axhline(y=1.03, color='k', linewidth=4)\r\n ax.axvline(x=-0.03, color='k', linewidth=4)\r\n ax.axvline(x=1, color='k', linewidth=4) \r\n plt.xticks([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=16, fontweight='bold')\r\n plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=16, fontweight='bold')\r\n #plt.xlabel('False Positive Rate', fontweight='bold', fontsize=16)\r\n #plt.ylabel('True Positive Rate', fontweight='bold', fontsize=16)\r\n plt.legend(loc='lower right', prop={'size': 14, 'weight': 'bold'}) \r\n plt.grid(True)\r\n\r\n ROC_filename = 'ROC' + '_' + \\\r\n str(DNN_Model) + '_' + \\\r\n strftime(\"%d-%b-%Y-%H-%M-%S\", gmtime()) + '.png'\r\n \r\n plt.savefig(\r\n os.path.join(result_dir, ROC_filename),\r\n format='png',\r\n dpi=600\r\n )\r\n\r\n plt.show()\r\n plt.close()", "def plotPRCurveForClass(self, classname, visualize=True):\n p, r, t = self.getPRCurveForClass(classname)\n self.data = {\"precisions\": p, \"recalls\": r, \"threshold_steps\": t}\n \n self.annot.set_visible(False) \n if visualize:\n self.plotCurve(self.data)\n self.figure.canvas.mpl_connect(\"motion_notify_event\", self.hover)\n # time.sleep(0.5)\n self.figure.canvas.flush_events()\n else:\n self.plotCurve(self.data)\n self.figure.canvas.flush_events()\n plt.savefig(f\"PR_Curve_{classname}.png\")", "def _plot_robot(self):\n try:\n x = 200\n y = 200\n self.ax1.plot(x, y, marker='o', markersize=10, linestyle='None')\n except Exception as err:\n rospy.loginfo(err)", "def pr_curve(model, X_train, y_train, X_test, y_test, train=True):\n from sklearn.metrics import precision_recall_curve\n if train==True:\n ypredTrain = model.predict(X_train) \n precisions, recalls, thresholds = precision_recall_curve(y_train, ypredTrain)\n plt.plot(precisions, recalls, linewidth=3, color='r', linestyle='-')\n plt.rc('xtick', labelsize=10) \n plt.rc('ytick', labelsize=10) \n plt.xlabel(\"Precision\", size=12)\n plt.ylabel(\"Recall\", size=12)\n plt.grid()\n plt.rcParams['figure.facecolor'] = '#F2F3F4' \n plt.rcParams['axes.facecolor'] = '#F2F3F4' \n plt.title(\"PR Curve: Precision/Recall Trade-off\\n\\n(Train)\\n\", size=14) \n plt.show()\n elif train==False:\n ypredTest = model.predict(X_test)\n precisions, recalls, thresholds = precision_recall_curve(y_test, ypredTest)\n plt.plot(precisions, recalls, linewidth=3, color='b', linestyle='-')\n plt.rc('xtick', labelsize=10) \n plt.rc('ytick', labelsize=10) \n plt.xlabel(\"Precision\", size=12)\n plt.ylabel(\"Recall\", size=12)\n plt.grid()\n plt.rcParams['figure.facecolor'] = '#F2F3F4' \n plt.rcParams['axes.facecolor'] = '#F2F3F4'\n plt.title(\"PR Curve: Precision/Recall Trade-off\\n\\n(Test)\\n\", size=14)\n plt.show()", "def plot_roc(training_history, fig_path, mtype=\"train\"):\r\n for i in range(len(training_history)):\r\n auc = training_history[i][mtype + \"_auc\"][-1]\r\n fpr = training_history[i][mtype + \"_fpr\"][-1]\r\n tpr = training_history[i][mtype + \"_tpr\"][-1]\r\n\r\n plt.plot(fpr, tpr, label=\"Fold-\" + str(i) + \" AUC = %0.2f\" % auc)\r\n\r\n plt.title(mtype + \" roc curve\")\r\n plt.legend(loc=\"lower right\")\r\n plt.plot([0, 1], [0, 1], \"r--\")\r\n plt.xlim([0, 1])\r\n plt.ylim([0, 1])\r\n plt.ylabel(\"True Positive Rate\")\r\n plt.xlabel(\"False Positive Rate\")\r\n plt.savefig(fig_path + mtype + \"-roc\" + \".png\", bbox_inches=\"tight\")\r\n plt.close()", "def plotPsCurve(mcoolsPath:list,celltypeNames:list,chroms:list,resolution=100000,title=\"P(s) curve\",plotType=\"interaction\",base=1.1,log_x=True,log_y=True):\n import plotly.express as px\n from IPython.display import Image\n\n #Calculate P(s) data, get a 3 column pd.DataFrame with (bin,resolution,celltype)\n psDataAll = []\n for i in range(len(mcoolsPath)):\n psDataAll.append(compartment.getPsData(mcoolsPath[i],[\"chr\"+str(i+1) for i in range(len(chroms))],resolution=resolution,celltype=celltypeNames[i],base=base)) \n merged = pd.concat(psDataAll)\n\n data = pd.merge(merged,merged.groupby(\"celltype\").sum(),how=\"left\",on=\"celltype\").assign(prob= lambda df: df.aveCount_x/df.aveCount_y)\n\n fig = px.line(x=data[\"bin_x\"]*resolution,y=data[\"prob\"],color=data[\"celltype\"],title=title,log_x=log_x,log_y=log_y).update_layout(template='simple_white')\n fig.update_layout(width=800,height=600)\n fig.update_layout(xaxis_title=\"Genomic Distance(bp)\",\n yaxis_title=\"Contact Probability\")\n if(plotType == \"interaction\"):\n return fig\n else : return Image(fig.to_image(format=\"png\", engine=\"kaleido\"))", "def gen_plot(fpr, tpr):\n plt.figure()\n plt.xlabel(\"FPR\", fontsize=14)\n plt.ylabel(\"TPR\", fontsize=14)\n plt.title(\"ROC Curve\", fontsize=14)\n plot = plt.plot(fpr, tpr, linewidth=2)\n buf = io.BytesIO()\n plt.savefig(buf, format='jpeg')\n buf.seek(0)\n plt.close()\n # plt.show()\n return buf", "def test_plot_pr_curves(od_detection_eval, od_detection_mask_eval):\n plot_pr_curves(od_detection_eval)\n plot_pr_curves(od_detection_mask_eval)", "def PlotRocs(self):\n fpr = []\n tpr = []\n for model in self.Models:\n fpr_tmp, tpr_tmp, thresholds = metrics.roc_curve(self.data.y_test, model.predict(self.data.X_test))\n fpr.append(fpr_tmp)\n tpr.append(tpr_tmp)\n plt.close\n for i, method in enumerate(self.method_name):\n print('AUC of method ' + method + ' : ', metrics.auc(fpr[i], tpr[i]))\n plt.plot(tpr[i], 1 - fpr[i], label=method)\n plt.legend()\n plt.show()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plots the roccurve per background category. Assumes signals are all datasets of the same signal.
def plot_roccurves_per_bkg(signals, bkgs, cut_function, cut_values, ax=None): # Get a default ax if none is given if ax is None: import matplotlib.pyplot as plt fig = plt.figure(figsize=(8,8)) ax = fig.gca() # Get signal efficieny once eff_sig, n_pass_sig, n_total_sig = get_eff(svjflatanalysis.iterate(signals), cut_function, cut_values) # Perform some basic plotting setup ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray') ax.set_xlim(0.0, 1.05) ax.set_ylim(0.0, 1.05) ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE) ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE) ax.legend(fontsize=DEFAULT_FONTSIZE) # Then efficiencies per bkg category (ttjets, qcd, ...) bkg_categories = list(set([ b.get_category() for b in bkgs ])) bkg_categories.sort() lines = {} for bkg_cat in bkg_categories: # Get Datasets that have this category bkgs_this_cat = [ b for b in bkgs if b.get_category() == bkg_cat ] # Compute efficiency in this category eff_bkg, n_pass_bkg, n_total_bkg = get_eff(svjflatanalysis.iterate(bkgs_this_cat), cut_function, cut_values) # Draw roccurve for this category line = _draw_roccurve(eff_sig, eff_bkg, cut_values, ax) line.set_label(bkg_cat) # Save this line in a dict for potential outputting/modifying lines[bkg_cat] = line return ax
[ "def Make_Binned_ANN_ROC_Curves(title,Signal_title,Background_title,bins,log=False):\n #hsv = plt.get_cmap('hsv')\n #color = hsv(np.linspace(0,1.0,len(bins)-1))\n #color = ['b', 'g', 'r', 'c', 'm', 'y']\n if len(bins)<=6:\n color = ['red','green','blue','orange','brown']\n else:\n color = ['deepskyblue','rosybrown','olivedrab','royalblue','firebrick','chartreuse','navy','red','darkorchid','lightseagreen','mediumvioletred','blue']\n nbins = 60\n\tdis_string = \"ANN_\"\n\n Signal_file = rt.TFile(\"Thesis_Plots/root_files/{}_ANN_histograms.root\".format(Signal_title),\"READ\")\n Background_file = rt.TFile(\"Thesis_Plots/root_files/{}_ANN_histograms.root\".format(Background_title),\"READ\")\n\n plt.figure(\"ROC\")\n plt.clf()\n\n for bin_ in range(len(bins)-1):\n Dis_Signal_Eff = FCM.Get_ROC_Efficiencies(Signal_file.Get(dis_string+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),nbins,0)\n Dis_BG_Eff = FCM.Get_ROC_Efficiencies(Background_file.Get(dis_string+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),nbins,0)\n CSV_Signal_Eff = FCM.Get_ROC_Efficiencies(Signal_file.Get(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),ratio_bins,0)\n CSV_BG_Eff = FCM.Get_ROC_Efficiencies(Background_file.Get(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),ratio_bins,0)\n if log:\n plt.semilogy(Dis_Signal_Eff,Dis_BG_Eff, color = color[bin_], linestyle = '-',label=str(bins[bin_])+\"_\"+str(bins[bin_+1]))\n plt.semilogy(CSV_Signal_Eff,CSV_BG_Eff, color = color[bin_],linestyle = '--',)\n\n else:\n plt.plot(Dis_Signal_Eff,1-Dis_BG_Eff, color = color[bin_], linestyle = '-',label=str(bins[bin_])+\"_\"+str(bins[bin_+1]))\n plt.plot(CSV_Signal_Eff,1-CSV_BG_Eff, color = color[bin_],linestyle = '--',)\n\n if log:\n\t\tif diff:\n\t\t\tplt.semilogy([0,0],[0,0],'k-',label = 'L4-L1')\n\t\telse:\n \tplt.semilogy([0,0],[0,0],'k-',label = 'L4/L1')\n plt.semilogy([0,0],[0,0],'k-.',label = 'CSV')\n plt.semilogy([0,1],[0.1,0.1],'k:')\n plt.xlabel(r\"$\\epsilon$_signal\")\n plt.ylabel(r\"$\\epsilon$_background\")\n plt.legend(loc=4)\n else:\n\t\tif diff:\n\t\t\tplt.plot([0,0],[0,0],'k-',label = 'L4-L1')\n\t\telse:\n \tplt.plot([0,0],[0,0],'k-',label = 'L4/L1')\n plt.plot([0,0],[0,0],'k-.',label = 'CSV')\n #plt.plot([0,1],[0.9,0.9],'k:',label=\"10% mistag\")\n plt.plot([0,1],[0.9,0.9],'k:')\n plt.xlabel(r\"$\\epsilon$_signal\")\n plt.ylabel(r\"1-$\\epsilon$_background\")\n plt.legend(loc=3)\n #plt.title(title+\"_ROC-Curves\")\n\n plt.savefig(\"Thesis_Plots/{}_ROC_Curves.png\".format(title))\n print \"saved as Thesis_Plots/{}_ROC_Curves.png\".format(title)", "def plot_background(self, data, background, scale=(5, 99)):\n # find the minimum and maximum value of plotting\n vmin = np.percentile(data, scale[0])\n vmax = np.percentile(data, scale[1])\n\n cax1 = self.ax1.imshow(data, cmap='gray', vmin=vmin, vmax=vmax,\n origin='lower')\n cax2 = self.ax2.imshow(background, cmap='viridis',\n origin='lower')\n cs = self.ax2.contour(background, colors='r', linewidths=0.5)\n self.ax2.clabel(cs, inline=1, fontsize=7, use_clabeltext=True)\n self.colorbar(cax1, cax=self.ax1c)\n self.colorbar(cax2, cax=self.ax2c)\n for ax in [self.ax1, self.ax2]:\n ax.set_xlabel('X (pixel)')\n ax.set_ylabel('Y (pixel)')\n ax.xaxis.set_major_locator(tck.MultipleLocator(500))\n ax.xaxis.set_minor_locator(tck.MultipleLocator(100))\n ax.yaxis.set_major_locator(tck.MultipleLocator(500))\n ax.yaxis.set_minor_locator(tck.MultipleLocator(100))", "def plot_roccurve(signals, bkgs, cut_function, cut_values, ax):\n eff_sig, eff_bkg, n_pass_sig, n_pass_bkg, n_total_sig, n_total_bkg = roccurve(signals, bkgs, cut_function, cut_values)\n return _draw_roccurve(eff_sig, eff_bkg, cut_values, ax)", "def plot(self, data, background, scale=(5, 99)):\n # find the minimum and maximum value of plotting\n vmin = np.percentile(data, scale[0])\n vmax = np.percentile(data, scale[1])\n\n cax1 = self.ax1.imshow(data, cmap='gray', vmin=vmin, vmax=vmax,\n origin='lower')\n cax2 = self.ax2.imshow(background, cmap='viridis',\n origin='lower')\n cs = self.ax2.contour(background, colors='r', linewidths=0.5)\n self.ax2.clabel(cs, inline=1, fontsize=7, use_clabeltext=True)\n self.colorbar(cax1, cax=self.ax1c)\n self.colorbar(cax2, cax=self.ax2c)\n for ax in [self.ax1, self.ax2]:\n ax.set_xlabel('X (pixel)')\n ax.set_ylabel('Y (pixel)')\n ax.xaxis.set_major_locator(tck.MultipleLocator(500))\n ax.xaxis.set_minor_locator(tck.MultipleLocator(100))\n ax.yaxis.set_major_locator(tck.MultipleLocator(500))\n ax.yaxis.set_minor_locator(tck.MultipleLocator(100))", "def plot_single_roccurve(signals, bkgs, cut_function, cut_values, ax=None):\n # Get a default ax if none is given\n if ax is None:\n import matplotlib.pyplot as plt\n fig = plt.figure(figsize=(8,8))\n ax = fig.gca()\n # Plot the base line\n ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray')\n # Plot the single roccurve\n line = plot_roccurve(signals, bkgs, cut_function, cut_values, ax=ax)\n line.set_label(bkgs[0].get_category())\n # Plot settings\n ax.set_xlim(0.0, 1.05)\n ax.set_ylim(0.0, 1.05)\n ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE)\n ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE)\n ax.legend(fontsize=DEFAULT_FONTSIZE)\n return ax", "def relative_src_bg(self):\n fig, ax = plt.subplots()\n \n for oneF in ['extracted_flux','extracted_bg_only']:\n wave, f = self.result['1d'][oneF]\n ax.plot(wave,f,label=oneF)\n ax.set_xlabel('Wavelength ($\\mu$m)')\n ax.set_ylabel('Extracted Flux')\n ax.legend()\n \n fig.show()", "def plot_ROC_zoom():\r\n \r\n fpr = dict()\r\n tpr = dict()\r\n threshold = dict()\r\n roc_auc = dict()\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.set_aspect('equal')\r\n\r\n colors = ['aqua', 'red', 'royalblue']\r\n for i, color in zip(range(len(y_prob_list)), colors):\r\n fpr[i], tpr[i], threshold[i] = roc_curve(y_test_list[i], y_prob_list[i])\r\n roc_auc[i] = auc(fpr[i], tpr[i])\r\n # plot ROC curve\r\n plt.plot(\r\n fpr[i],\r\n tpr[i],\r\n color=color,\r\n linewidth=3,\r\n label='AUC %0.3f'%roc_auc[i]\r\n )\r\n\r\n plt.legend(loc='lower right', prop={'size': 13, 'weight': 'bold'})\r\n # plt.plot([0, 1], [0, 1], 'r--', linewidth=2)\r\n plt.xlim([-0.01, 0.20])\r\n plt.ylim([0.8, 1.01])\r\n plt.xticks([0, 0.05, 0.10, 0.15, 0.20], fontsize=14, fontweight='bold')\r\n plt.yticks([0.80, 0.85, 0.90, 0.95, 1.00], fontsize=14, fontweight='bold')\r\n ax.axhline(y=0.8, color='k', linewidth=3)\r\n ax.axhline(y=1.01, color='k', linewidth=3)\r\n ax.axvline(x=-0.01, color='k', linewidth=3)\r\n ax.axvline(x=0.20, color='k', linewidth=3)\r\n\r\n plt.ylabel('True Positive Rate', fontweight='bold', fontsize=14)\r\n plt.xlabel('False Positive Rate', fontweight='bold', fontsize=14)\r\n plt.xticks(fontsize=14, fontweight='bold')\r\n plt.yticks(fontsize=14, fontweight='bold')\r\n ax.tick_params(direction='out', length=6, width=2, colors='k',\r\n grid_color='k', grid_alpha=0.5)\r\n\r\n plt.grid(True)\r\n plt.show()\r\n plt.savefig(os.path.join(result_dir, 'ROC_exvivo_2.png'), format='png', dpi=600)\r\n plt.close()", "def Make_ROC_Curves(title,Signal_title,Background_title,diff_ran,ratio_ran,ratio_bins,pT_cut):\n\tdiff_bins = diff_ran[1]-diff_ran[0]\n\tSignal_ZeroDiv = np.loadtxt(\"histogram_files/{}_ZeroDiv.csv\".format(Signal_title),delimiter=',')\n\tSignal_file = \t\t\trt.TFile(\"histogram_files/{}_histograms.root\".format(Signal_title),\"READ\")\n\tSignal_Diff_eff = \t\tGet_ROC_Efficiencies(Signal_file.Get(\"L4-L1\"),diff_ran,diff_bins,0)\n\tSignal_Diff_eff_jet_pT = \tGet_ROC_Efficiencies(Signal_file.Get(\"L4-L1_pTJ\"),diff_ran,diff_bins,0)\n\tSignal_Ratio_eff = \t\tGet_ROC_Efficiencies(Signal_file.Get(\"L4_L1\"),ratio_ran,ratio_bins,Signal_ZeroDiv[0])\n\tSignal_Ratio_eff_jet_pT = \tGet_ROC_Efficiencies(Signal_file.Get(\"L4_L1_pTJ\"),ratio_ran,ratio_bins,Signal_ZeroDiv[2])\n\tSignal_CSV_eff = \t\tGet_ROC_Efficiencies(Signal_file.Get(\"CSV\"),(0,1),ratio_bins,0)\n\tSignal_CSV_eff_jet_pT = \tGet_ROC_Efficiencies(Signal_file.Get(\"CSV_pTJ\"),(0,1),ratio_bins,0)\n\n\tBackground_ZeroDiv = np.loadtxt(\"histogram_files/{}_ZeroDiv.csv\".format(Background_title),delimiter=',')\n\tBackground_file = \t\trt.TFile(\"histogram_files/{}_histograms.root\".format(Background_title),\"READ\")\n\t#print \"L4-L1\"\n\t#Background_Diff_eff = \t\tGet_ROC_Efficiencies(Background_file.Get(\"L4-L1\"),diff_ran,diff_bins,0,print_cut=True)\n\t#print \"L4-L1_pTH\"\n\t#Background_Diff_eff_hadron_pT = Get_ROC_Efficiencies(Background_file.Get(\"L4-L1_pTH\"),diff_ran,diff_bins,0,print_cut=True)\n\t#print \"L4-L1_pTJ\"\n\t#Background_Diff_eff_jet_pT = \tGet_ROC_Efficiencies(Background_file.Get(\"L4-L1_pTJ\"),diff_ran,diff_bins,0,print_cut=True)\n\tprint \"L4_L1\"\n\tBackground_Ratio_eff = \t\tGet_ROC_Efficiencies(Background_file.Get(\"L4_L1\"),ratio_ran,ratio_bins,Background_ZeroDiv[0],print_cut=True)\n\t#print \"L4_L1_pTH\"\n\t#Background_Ratio_eff_hadron_pT=\tGet_ROC_Efficiencies(Background_file.Get(\"L4_L1_pTH\"),ratio_ran,ratio_bins,Background_ZeroDiv[1],print_cut=True)\n\tprint \"L4_L1_pTJ\"\n\tBackground_Ratio_eff_jet_pT = \tGet_ROC_Efficiencies(Background_file.Get(\"L4_L1_pTJ\"),ratio_ran,ratio_bins,Background_ZeroDiv[2],print_cut=True)\n\tprint \"CSV\"\n\tBackground_CSV_eff = \t\tGet_ROC_Efficiencies(Background_file.Get(\"CSV\"),(0,1),ratio_bins,0,print_cut=True)\n #print \"CSV_pTH\"\n\t#Background_CSV_eff_hadron_pT =\tGet_ROC_Efficiencies(Background_file.Get(\"CSV_pTH\"),(0,1),ratio_bins,0,print_cut=True)\n\tprint \"CSV_pTJ\"\n\tBackground_CSV_eff_jet_pT = \tGet_ROC_Efficiencies(Background_file.Get(\"CSV_pTJ\"),(0,1),ratio_bins,0,print_cut=True)\n\tplt.figure(\"ROC\")\n\tplt.clf()\n\t#plt.plot(Signal_Diff_eff,1-Background_Diff_eff,'r-',label='L4-L1')\n\t#plt.plot(Signal_Diff_eff_hadron_pT,1-Background_Diff_eff_hadron_pT,'r-.',label='L4-L1_pTH'+str(pT_cut))\n\t#plt.plot(Signal_Diff_eff_jet_pT,1-Background_Diff_eff_jet_pT,'r--',label='L4-L1_pTJ'+str(pT_cut))\n\tplt.plot(Signal_Ratio_eff,1-Background_Ratio_eff,'b-',label='L4_L1')\n\t#plt.plot(Signal_Ratio_eff_hadron_pT,1-Background_Ratio_eff_hadron_pT,'b-.',label='L4_L1_pTH'+str(pT_cut))\n\tplt.plot(Signal_Ratio_eff_jet_pT,1-Background_Ratio_eff_jet_pT,'b--',label='L4_L1_pTJ'+str(pT_cut))\n\tplt.plot(Signal_CSV_eff,1-Background_CSV_eff,'g-',label='CSV')\n #plt.plot(Signal_CSV_eff_hadron_pT,1-Background_CSV_eff_hadron_pT,'g-.',label='CSV_pTH'+str(pT_cut))\n plt.plot(Signal_CSV_eff_jet_pT,1-Background_CSV_eff_jet_pT,'g--',label='CSV_pTJ'+str(pT_cut))\n\tplt.plot([0,1],[0.9,0.9],'k:',label=\"10% mistag\")\n\tplt.xlabel(r\"$\\epsilon$_signal\")\n\tplt.ylabel(r\"1-$\\epsilon$_background\")\n\tplt.title(\"ROC-Curves\")\n\tplt.legend(loc=3)\n\tplt.savefig(\"ROC/{}_ROC_Curves.png\".format(title))\n\tplt.figure(\"Log_ROC\")\n\tplt.clf()\n\t#plt.semilogy(Signal_Diff_eff,Background_Diff_eff,'r-',label='L4-L1')\n\t#plt.semilogy(Signal_Diff_eff_hadron_pT,Background_Diff_eff_hadron_pT,'r-.',label='L4-L1_pTH'+str(pT_cut))\n\t#plt.semilogy(Signal_Diff_eff_jet_pT,Background_Diff_eff_jet_pT,'r--',label='L4-L1_pTJ'+str(pT_cut))\n\tplt.semilogy(Signal_Ratio_eff,Background_Ratio_eff,'b-',label='L4_L1')\n\t#plt.semilogy(Signal_Ratio_eff_hadron_pT,Background_Ratio_eff_hadron_pT,'b-.',label='L4_L1_pTH'+str(pT_cut))\n\tplt.semilogy(Signal_Ratio_eff_jet_pT,Background_Ratio_eff_jet_pT,'b--',label='L4_L1_pTJ'+str(pT_cut))\n\tplt.semilogy(Signal_CSV_eff,Background_CSV_eff,'g-',label='CSV')\n #plt.semilogy(Signal_CSV_eff_hadron_pT,Background_CSV_eff_hadron_pT,'g-.',label='CSV_pTH'+str(pT_cut))\n plt.semilogy(Signal_CSV_eff_jet_pT,Background_CSV_eff_jet_pT,'g--',label='CSV_pTJ'+str(pT_cut))\n\tplt.semilogy([0,1],[0.1,0.1],'k:',label=\"10% mistag\")\n\tplt.xlabel(r\"$\\epsilon$_signal\")\n\tplt.ylabel(r\"$\\epsilon$_background\")\n\tplt.title(\"ROC-Curves_log\")\n\tplt.legend(loc=4)\n\tplt.savefig(\"ROC/{}_ROC_Curves_log.png\".format(title))\n\tplt.show()", "def spiderplot(categories, values, ax=None,\n axfc = None,\n lcolor=\"k\", lsize=\"small\", \n rcolor=\"0.7\", rsize=\"small\", rarray=None,\n title=None, titlecolor=\"k\", titlesize=\"medium\",\n fillcolor = \"C0\", fillalpha=0.1, \n highlight_unique=True,\n highlight_color=\"C0\", \n **kwargs):\n import matplotlib.pyplot as mpl\n \n if highlight_unique:\n flagnonzero = np.asarray(values)>0 \n highlight = np.argwhere(flagnonzero)[0] if np.sum(flagnonzero) == 1 else None\n lcolor = \"0.5\"\n else:\n highlight = None\n \n # But we need to repeat the first value to close the circular graph:\n values = list(values)\n values += values[:1]\n ncategories = len(categories)\n \n \n # == Plot\n if ax is None:\n fig = mpl.figure(figsize=[3,3.5])\n ax = fig.add_axes([0.1,0.12,0.8,0.7], polar=True, \n facecolor=axfc,\n zorder=1)\n else:\n ax = ax\n fig = ax.figure\n\n # What will be the angle of each axis in the plot? (we divide the plot / number of variable)\n angles = [n / float(ncategories) * 2 * np.pi for n in range(ncategories)]\n angles += angles[:1]\n \n # Draw one axe per variable + add labels labels yet\n ax.set_xticks(angles[:-1])\n ax.set_xticklabels(categories, color=lcolor, size=lsize)\n \n if highlight is not None and highlight_unique:\n xtick = ax.get_xticklabels()[highlight[0]]\n xtick.set_color(highlight_color)\n xtick.set_weight(\"bold\")\n xtick.set_size(xtick.get_size()*1.2)\n \n\n \n # Draw ylabels\n ax.set_rlabel_position(0)\n \n # Scaling\n if rarray is not None: \n ax.set_yticks(rarray[:-1])\n ax.set_ylim(0,rarray[-1])\n \n ax.set_yticklabels(np.asarray(ax.get_yticks(), dtype=\"str\"), \n color=rcolor, size=rsize)\n \n # --------------- #\n # Actual Plot #\n # --------------- #\n # Plot data\n prop = dict(linewidth=1.5, linestyle='solid', color=fillcolor)\n for k,v in kwargs.items():\n prop[k] = v\n # python 3 -> prop = {**dict(linewidth=1.5, linestyle='solid'), **kwarg}\n ax.plot(angles, values, **prop)\n \n # Fill area\n ax.fill(angles, values, fillcolor, alpha=fillalpha)\n \n # Additional Info\n # First entry\n if title is not None:\n ax.set_title(title, size=titlesize, color=titlecolor)\n \n return {\"ax\":ax, \"fig\":fig, \"highlight\":highlight}", "def plot_roc_curve(self, classification_reports):\n #create the plots\n if len(classification_reports) > 2:\n fig, ax = plt.subplots(ncols=2, nrows=int(len(classification_reports)/2) + len(classification_reports) % 2)\n #removes last ax if uneven:\n if (len(classification_reports) % 2) == 1:\n fig.delaxes(ax[int(len(ax) - 1), 1])\n for i in range(len(classification_reports)):\n ax[int(i/2), i%2].plot(classification_reports[i].fpr, classification_reports[i].tpr, \\\n label='%s (area = %0.2f)' % (classification_reports[i].label, classification_reports[i].roc_auc),\\\n color=Plotting.color2)\n # plots the f(x) = x line:\n ax[int(i/2), i%2].plot([0, 1], [0, 1], color=Plotting.color1)\n ax[int(i/2), i%2].set_xlim([0.0, 1.0])\n ax[int(i/2), i%2].set_ylim([0.0, 1.0])\n ax[int(i/2), i%2].set_title(classification_reports[i].label)\n ax[int(i/2), i%2].set_ylabel('True Positive Rate')\n ax[int(i/2), i%2].set_xlabel('False Positive Rate')\n ax[int(i/2), i%2].legend(loc=\"lower right\")\n\n elif len(classification_reports) == 2:\n fig, ax = plt.subplots(ncols=2, nrows=1)\n for i in range(len(classification_reports)):\n ax[i].plot(classification_reports[i].fpr, classification_reports[i].tpr, \\\n label='%s (area = %0.2f)' % (classification_reports[i].label, classification_reports[i].roc_auc),\\\n color=Plotting.color2)\n # plots the f(x) = x line:\n ax[i].plot([0, 1], [0, 1], color=Plotting.color1)\n ax[i].set_xlim([0.0, 1.0])\n ax[i].set_ylim([0.0, 1.0])\n ax[i].set_title(classification_reports[i].label)\n ax[i].set_ylabel('True Positive Rate')\n ax[i].set_xlabel('False Positive Rate')\n ax[i].legend(loc=\"lower right\")\n\n elif len(classification_reports) == 1:\n fig, ax = plt.subplots(ncols=1, nrows=1)\n ax.plot(classification_reports[0].fpr, classification_reports[0].tpr, \\\n label='%s (area = %0.2f)' % (classification_reports[0].label, classification_reports[0].roc_auc),\\\n color=Plotting.color2)\n # plots the f(x) = x line:\n ax.plot([0, 1], [0, 1], color=Plotting.color1)\n ax.set_xlim([0.0, 1.0])\n ax.set_ylim([0.0, 1.0])\n ax.set_title(classification_reports[0].label)\n ax.set_ylabel('True Positive Rate')\n ax.set_xlabel('False Positive Rate')\n ax.legend(loc=\"lower right\")\n\n plt.suptitle('Receiver operating characteristic')\n plt.tight_layout()\n plt.show()\n return", "def radarplots(self):\n fig1 = go.Figure()\n \n features = self.data.columns[:3]\n groupings = self.data.groupby('label').mean()\n names = ['group{}'.format(i+1) for i in range(self.num_clusters)]\n \n for f in features: \n fig1.add_trace(go.Scatterpolar(\n r=groupings[f],\n theta=names,\n fill='toself', \n name=f\n ))\n \n fig1.show()\n\n fig2 = go.Figure()\n \n groupings_t = groupings.T\n \n for g in range(self.num_clusters): \n fig2.add_trace(go.Scatterpolar(\n r=groupings_t.iloc[:, g], \n theta=features, \n fill='toself', \n name='group{}'.format(g)\n ))\n \n fig2.show()", "def drawCloudSpectra(self,TB,coPath):\n fig = plt.figure(figsize=(12, 12))\n # fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12,6),sharex=True)\n rc('text', usetex=True)\n rc('font', **{'family': 'sans-serif', 'size': 18, 'serif': ['Helvetica']})\n\n\n\n for eachRow in TB:\n\n rawCOName= eachRow[self.tbNameCol]\n\n rawCOFITS = os.path.join( coPath , rawCOName[0:9]+\"_C.fits\" )\n\n drawL,drawB = eachRow[\"x_cen\"] , eachRow[\"y_cen\"]\n\n Radius=30./3600.*1\n\n\n lRangeSpec = [ drawL - Radius, drawL + Radius ]\n bRangeSpec = [ drawB - Radius, drawB + Radius ]\n\n averageSpec,Vs= doFITS.getAverageSpecByLBrange(rawCOFITS, lRange = lRangeSpec ,bRange= bRangeSpec )\n spectralMean=0\n try:\n spectralMean=np.nanmean( averageSpec[ int(eachRow[\"peakV\"])-50:int(eachRow[\"peakV\"]) -10 ] )\n except:\n pass\n rms=np.sqrt( np.mean( np.square(averageSpec[averageSpec<= spectralMean ]) ) )\n #ax.axhline(Vs, averageSpec,color='blue',where='mid',lw=0.8 ,zorder=2, label= labelStr )\n\n ######get goo channels\n\n cutSpec= averageSpec[ int(eachRow[\"peakV\"])-10:int(eachRow[\"peakV\"])+10 ]\n\n SNR3Pix=cutSpec[cutSpec>= spectralMean+rms*3 ]\n\n\n if len( SNR3Pix )<=2 :\n #insufficient dispersion\n\n\n continue\n\n if eachRow[\"peakV\"] >6800 and eachRow[\"peakV\"]<6900:\n continue\n\n\n ax = fig.add_subplot(2, 1, 1)\n ax2 = fig.add_subplot(2, 1, 2, sharex=ax)\n\n ax.axhline(y=spectralMean+rms*3, ls=\"--\", color='black', lw=0.8)\n\n ax.plot([eachRow[\"v_cen\"], eachRow[\"v_cen\"]], [-0.5, np.nanmax(averageSpec) * 1.2], color='red', lw=1.5,\n zorder=1)\n ax2.plot([eachRow[\"v_cen\"], eachRow[\"v_cen\"]], [-0.5, 5], color='red', lw=1.5, zorder=1)\n\n peakL, peakB ,peakV= eachRow[\"peakL\"], eachRow[\"peakB\"], eachRow[\"peakV\"]\n\n labelStr = \"peak index(L,B,V): ({}, {}, {} )\".format(int(peakL), int(peakB), int(peakV) )\n\n ax.step(Vs, averageSpec, color='blue', where='mid', lw=0.8, zorder=2, label=labelStr)\n\n\n ax.axhline(y= spectralMean , ls=\"-\", color='black', lw=0.8)\n ax2.scatter(TB[\"v_cen\"], TB[\"peak\"],color='green' ,s=5 )\n ax.set_xlim(eachRow[\"v_cen\"]-20, eachRow[\"v_cen\"]+20 )\n\n\n ax.legend(loc=1,handlelength=0.5)\n saveFigname= rawCOName[0:9]+\"{}.png\".format( eachRow[\"_idx\"] )\n\n saveFigname= os.path.join( self.figurePath, saveFigname )\n\n ax.set_xlabel(\"Radial velocity\")\n ax.set_ylabel(r\"Average T$_{\\rm mb}$ (5$\\times$5)\")\n ax2.set_ylabel(r\"Peak T$_{\\rm mb}$\")\n\n\n plt.savefig( saveFigname , bbox_inches='tight',dpi=100)\n\n ax.cla()\n ax2.cla()", "def plot_roc_curve(test_y, P_base_learners, P_ensemble, labels, ens_label):\n plt.figure(figsize=(10, 8))\n plt.plot([0, 1], [0, 1], 'k--')\n \n cm = [plt.cm.rainbow(i)\n for i in np.linspace(0, 1.0, P_base_learners.shape[1] + 1)]\n \n for i in range(P_base_learners.shape[1]):\n p = P_base_learners[:, i]\n fpr, tpr, _ = roc_curve(test_y, p)\n plt.plot(fpr, tpr, label=labels[i], c=cm[i + 1])\n\n fpr, tpr, _ = roc_curve(test_y, P_ensemble)\n plt.plot(fpr, tpr, label=ens_label, c=cm[0]) \n plt.xlabel('False positive rate')\n plt.ylabel('True positive rate')\n plt.title('ROC curve')\n plt.legend(frameon=False)\n plt.savefig('G:/Cardiac/Roc_curve.jpg')\n plt.show()", "def _configure_plot(self):\r\n self.plt.setBackground(background=None)\r\n self.plt.setAntialiasing(True)\r\n self._plt = self.plt.addPlot(row=1, col=1)\r\n self._plt.setLabel('bottom', \"Tiempo\", \"s\")\r\n self._plt.setLabel('left', \"Amplitud\", \"Volt\")\r\n self._plt.showGrid(x=False, y=True)\r\n\r\n self.plt_2.setBackground(background=None)\r\n self.plt_2.setAntialiasing(True)\r\n self._plt_2 = self.plt_2.addPlot(row=1, col=1)\r\n self._plt_2.setLabel('bottom', \"Tiempo\", \"s\")\r\n self._plt_2.setLabel('left', \"Amplitud\", \"Volt\")\r\n self._plt_2.showGrid(x=False, y=False)\r\n\r\n self.Rplt.setBackground(background=None)\r\n self.Rplt.setAntialiasing(True)\r\n self._Rplt = self.Rplt.addPlot(row=1, col=1)\r\n self._Rplt.setLabel('bottom', \"R-value\")\r\n self._Rplt.setLabel('left', \"%SpO2\")\r\n self._Rplt.showGrid(x=False, y=True)\r\n\r\n self.Rplt_2.setBackground(background=None)\r\n self.Rplt_2.setAntialiasing(True)\r\n self._Rplt_2 = self.Rplt_2.addPlot(row=1, col=1)\r\n self._Rplt_2.setLabel('bottom', \"Longitud de Onda\", \"*nm\")\r\n self._Rplt_2.setLabel('left', \"Absorbancia\")\r\n self._Rplt_2.showGrid(x=False, y=True)", "def plot_roc(training_history, fig_path, mtype=\"train\"):\r\n for i in range(len(training_history)):\r\n auc = training_history[i][mtype + \"_auc\"][-1]\r\n fpr = training_history[i][mtype + \"_fpr\"][-1]\r\n tpr = training_history[i][mtype + \"_tpr\"][-1]\r\n\r\n plt.plot(fpr, tpr, label=\"Fold-\" + str(i) + \" AUC = %0.2f\" % auc)\r\n\r\n plt.title(mtype + \" roc curve\")\r\n plt.legend(loc=\"lower right\")\r\n plt.plot([0, 1], [0, 1], \"r--\")\r\n plt.xlim([0, 1])\r\n plt.ylim([0, 1])\r\n plt.ylabel(\"True Positive Rate\")\r\n plt.xlabel(\"False Positive Rate\")\r\n plt.savefig(fig_path + mtype + \"-roc\" + \".png\", bbox_inches=\"tight\")\r\n plt.close()", "def plot_ROC():\r\n fpr = dict()\r\n tpr = dict()\r\n roc_auc = dict()\r\n threshold = dict()\r\n \r\n for i in range(n_classes):\r\n \r\n fpr[i], tpr[i], threshold[i] = roc_curve(y_test[:, i], y_pred[:, i])\r\n \r\n roc_auc[i] = auc(fpr[i], tpr[i])\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.set_aspect('equal')\r\n colors = cycle(['aqua', 'red', 'purple', 'royalblue', 'black'])\r\n \r\n for i, color in zip(range(n_classes), colors):\r\n \r\n plt.plot(\r\n fpr[i],\r\n tpr[i],\r\n color=color,\r\n linewidth=3,\r\n #label='Class {0} (AUC {1:0.3f})'\r\n label='AUC {1:0.2f}' \r\n ''.format(i+1, roc_auc[i])\r\n )\r\n\r\n #plt.plot([0, 1], [0, 1], 'k--', linewidth=3)\r\n plt.xlim([-0.03, 1])\r\n plt.ylim([0, 1.03])\r\n ax.axhline(y=0, color='k', linewidth=4)\r\n ax.axhline(y=1.03, color='k', linewidth=4)\r\n ax.axvline(x=-0.03, color='k', linewidth=4)\r\n ax.axvline(x=1, color='k', linewidth=4) \r\n plt.xticks([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=16, fontweight='bold')\r\n plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=16, fontweight='bold')\r\n #plt.xlabel('False Positive Rate', fontweight='bold', fontsize=16)\r\n #plt.ylabel('True Positive Rate', fontweight='bold', fontsize=16)\r\n plt.legend(loc='lower right', prop={'size': 14, 'weight': 'bold'}) \r\n plt.grid(True)\r\n\r\n ROC_filename = 'ROC' + '_' + \\\r\n str(DNN_Model) + '_' + \\\r\n strftime(\"%d-%b-%Y-%H-%M-%S\", gmtime()) + '.png'\r\n \r\n plt.savefig(\r\n os.path.join(result_dir, ROC_filename),\r\n format='png',\r\n dpi=600\r\n )\r\n\r\n plt.show()\r\n plt.close()", "def _roc_plot(self, roc_curves):\n # figure\n p = default_figure(\n {\n \"x_range\": (-0.01, 1.1),\n \"y_range\": (-0.01, 1.1),\n \"tools\": \"pan,wheel_zoom,box_zoom,reset\",\n \"toolbar_location\": \"right\"\n }\n )\n\n # main lines added to the plot\n self._default_models_lines(p, roc_curves)\n\n # baseline comparison\n p.line(\n [0, 1], # line x=y\n [0, 1],\n line_dash=\"dashed\",\n line_width=1,\n color=self.plot_design.models_dummy_color,\n legend_label=\"Random Baseline\",\n muted_alpha=0.5 # clicked line in the Legend will be muted\n )\n\n # plot specific styling\n p.legend.location = \"bottom_right\"\n p.xaxis.axis_label = \"False Positive Rate\"\n p.yaxis.axis_label = \"True Positive Rate\"\n\n return p", "def _multicolor_plot(self):\n\n points = self._curve.data[:, np.newaxis]\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n curve_values = list(self._curve.values())\n multicolor_values = getattr(self._curve, self._param)\n\n marker = self._kwargs.pop('marker', None)\n\n if self._curve.is2d:\n line_collection = LineCollection\n else:\n line_collection = Line3DCollection\n\n norm = Normalize(multicolor_values.min(), multicolor_values.max())\n mappable = ScalarMappable(norm=norm, cmap=self._param_cmap)\n colors = mappable.to_rgba(multicolor_values)\n\n curve_segments = line_collection(segments, zorder=0, **self._kwargs)\n curve_segments.set_array(multicolor_values)\n curve_segments.set_norm(norm)\n curve_segments.set_cmap(self._param_cmap)\n\n if self._curve.is2d:\n self._axes.add_collection(curve_segments)\n self._axes.autoscale()\n else:\n self._axes.add_collection3d(curve_segments)\n self._axes.auto_scale_xyz(*curve_values)\n\n if marker:\n self._axes.scatter(*curve_values, c=colors, marker=marker, **self._kwargs)\n\n cbar = self._axes.figure.colorbar(mappable, ax=self._axes)\n cbar.ax.set_ylabel(self._param)", "def plot_sample(x):\n plt.imshow(x[:,:,0])\n plt.title(\"gasf\")\n plt.colorbar()\n plt.show()\n\n plt.imshow(x[:,:,1])\n plt.title(\"gadf\")\n plt.colorbar()\n plt.show()\n\n plt.imshow(x[:,:,2])\n plt.title(\"mtf\")\n plt.colorbar()\n plt.show()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fills a coffea.hist.Hist for a single distribution. Takes a list of Dataset objects, and a function `get_array` that should return a numpylike array when given an arrays object. Also requires a string `name` to know in which hist to fill it
def hist_single_distribution( arrays_iterator, get_array, varname='somevar', vartitle=None, distrname='somedistr', distrtitle=None, hist=None, left=-1., right=1., nbins=50 ): if hist is None: import coffea.hist vartitle = varname if vartitle is None else vartitle hist = coffea.hist.Hist( "Count", coffea.hist.Bin(varname, vartitle, nbins, left, right), coffea.hist.Cat('label', varname), ) for arrays, dataset in arrays_iterator: print(dataset.get_weight(), get_array(arrays)) hist.fill(label=distrname, weight=dataset.get_weight(), **{varname: get_array(arrays)}) return hist
[ "def array2hist(array, hist_name='hist_name', binning=(10,0,100), errors=None):\n if array.size != binning[0]:\n raise ValueError('Array size must be number of bins!')\n padded = np.pad(array,(1,1),'constant')\n if array.dtype == np.float32:\n h = ROOT.TH1F(hist_name,hist_name,binning[0],binning[1],binning[2])\n elif array.dtype == np.float64:\n h = ROOT.TH1D(hist_name,hist_name,binning[0],binning[1],binning[2])\n else:\n raise TypeError('We can only handle np.float32 and np.float64')\n h.Set(padded.size, padded)\n h.SetEntries(array.size)\n if errors is not None:\n if errors.size != array.size:\n raise ValueError('Error is not the same size as the array')\n pe = np.pad(np.ascontiguousarray(errors, dtype=np.float64), (1,1), 'constant')\n h.SetError(pe)\n return h", "def Fill(self, *args, **kwargs):\n self._varexp = kwargs.get(\"varexp\")\n self._cuts = kwargs.get(\"cuts\", [])\n self._weight = kwargs.get(\"weight\", \"1\")\n if len(args) == 1 and isinstance(args[0], (str, unicode)):\n IOManager.FillHistogram(self, args[0], **kwargs)\n if not kwargs.get(\"append\", False):\n self._errorband.Reset()\n self._errorband.Add(self)\n else:\n super(Histo1D, self).Fill(*args)", "def hist_aggregate(hist_name, hist_dim=1, norm=None, **hist_args):\n\tdef decorator(fn):\n\t\tdef _inner(vals, hist_collection):\n\t\t\tvals = fn(vals)\n\n\t\t\t# we want to be able to handle dicts\n\t\t\t# for the case where multiple instances of the \"same\" hist\n\t\t\t# separated by a selection (the dict key) are returned.\n\t\t\t# if that *isn't* what happened, turn it into a dict with a single key.\n\t\t\tif not isinstance(vals, dict):\n\t\t\t\tvals = {None: vals}\n\n\t\t\tfor subsample, vs in vals.items():\n\t\t\t\tfull_hist_name = \"%s_%s\" % (hist_name, subsample) if subsample else hist_name\n\t\t\t\tif hist_dim == 1:\n\t\t\t\t\thist, bins = numpy.histogram(vs, **hist_args)\n\t\t\t\telif hist_dim == 2:\n\t\t\t\t\tif len(vs) == 0:\n\t\t\t\t\t\treturn\n\t\t\t\t\thist, binsx, binsy = numpy.histogram2d(*vs, **hist_args)\n\t\t\t\t\tbins = (binsx, binsy)\n\t\t\t\telse:\n\t\t\t\t\traise ValueError(\"Unsupported histogram dimension: \" + str(hist_dim))\n\n\t\t\t\tif full_hist_name in hist_collection:\n\t\t\t\t\th = hist_collection[full_hist_name]\n\t\t\t\t\tif h.dim == 1:\n\t\t\t\t\t\tassert all(h.bins == bins)\n\t\t\t\t\telif h.dim == 2:\n\t\t\t\t\t\tassert all([numpy.array_equal(h.bins[i], bins[i]) for i in range(len(h.bins))])\n\t\t\t\t\thist_collection[full_hist_name].data += hist\n\t\t\t\telse:\n\t\t\t\t\th = Hist(dim=hist_dim, bins=bins, data=hist, norm=norm)\n\t\t\t\t\thist_collection[full_hist_name] = h\n\n\t\treturn _inner\n\n\treturn decorator", "def _get_hist(ndim):\n if ndim == 3:\n return hu.create_histogram(np.random.uniform(0, 1, (10000, 3)),\n (10, 0, 1, 20, 0, 1, 30, 0, 1))\n if ndim == 2:\n return hu.create_histogram(np.random.uniform(0, 1, (10000, 2)),\n (10, 0, 1, 20, 0, 1))\n if ndim == 1:\n return hu.create_histogram(np.random.uniform(0, 1, 10000),\n (10, 0, 1))\n if ndim >= 4:\n hist = r.THnD('hist4d', '', ndim, np.arange(2, ndim + 2, 1, dtype='i4'),\n np.zeros(ndim), np.ones(ndim))\n fill_vals = np.random.uniform(0, 1, (10000, ndim))\n for i in xrange(len(fill_vals)):\n hist.Fill(fill_vals[i, :])\n hist.Sumw2()\n return hist", "def addHistogram1D(self, name, title, n_bins, minimum, maximum):\n\t\tself.histograms[ name ] = ROOT.TH1F(name, title, n_bins, minimum, maximum)", "def add_histogram(self, tag, values, global_step=None, bins='tensorflow'):\n values = make_np(values)\n self.vis.histogram(make_np(values), opts={'title': tag})", "def hist1d(arr, bins=None, amp_range=None, weights=None, color=None, show_stat=True, log=False, \\\n figsize=(6,5), axwin=(0.15, 0.12, 0.78, 0.80), \\\n title=None, xlabel=None, ylabel=None, titwin=None):\n #print 'hist1d: title=%s, size=%d' % (title, arr.size)\n if arr.size==0: return None, None, None\n fig = plt.figure(figsize=figsize, dpi=80, facecolor='w', edgecolor='w', frameon=True)\n if titwin is not None: fig.canvas.set_window_title(titwin)\n elif title is not None: fig.canvas.set_window_title(title)\n axhi = fig.add_axes(axwin)\n hbins = bins if bins is not None else 100\n hi = axhi.hist(arr.flatten(), bins=hbins, range=amp_range, weights=weights, color=color, log=log) #, log=logYIsOn)\n if amp_range is not None: axhi.set_xlim(amp_range) # axhi.set_autoscale_on(False) # suppress autoscailing\n if title is not None: axhi.set_title(title, color='k', fontsize=20)\n if xlabel is not None: axhi.set_xlabel(xlabel, fontsize=14)\n if ylabel is not None: axhi.set_ylabel(ylabel, fontsize=14)\n if show_stat:\n weights, bins, patches = hi\n add_stat_text(axhi, weights, bins)\n return fig, axhi, hi", "def autoHistogram1D(values, number_of_bins=None, range=None,\n weights=None, name=None, units=None):\n\n if len(values) == 0:\n raise ValueError, \"'values' is empty\"\n value_type = type(values[0])\n\n if number_of_bins is None:\n # Choose a reasonable number of bins.\n number_of_bins = min(100, max(10, len(values) / 10))\n\n if range is None:\n # Compute the actual range of values.\n lo, hi = hep.fn.minmax(None, values)\n if value_type in (int, long):\n # For integral types, expand the upper limit by one so the\n # largest value fits in the last bin.\n hi += 1\n # We also need to adjust the range so it is a multiple of\n # the number of bins.\n hi += number_of_bins - (hi - lo) % number_of_bins\n else:\n lo, hi = makeNiceRange(lo, hi)\n else:\n lo, hi = range\n # Cast the range to the appropriate type.\n lo, hi = map(value_type, (lo, hi))\n \n # Use 'float' bins if weights are given, 'int' otherwise.\n if weights is None:\n bin_type = int\n error_model = \"poisson\"\n else:\n bin_type = float\n error_model = \"symmetric\"\n\n # Construct the histogram.\n histogram = Histogram1D(number_of_bins, (lo, hi),\n bin_type=bin_type, error_model=error_model)\n if name is not None:\n histogram.axis.name = name\n if units is not None:\n histogram.axis.units = units\n # Fill the values into it.\n if weights is None:\n map(histogram.accumulate, values)\n else:\n for value, weight in zip(values, weights):\n histogram.accumulate(value, weight)\n\n return histogram", "def np_histogram(data, title, bins=\"auto\"):\n figure = plt.figure()\n canvas = figure.canvas\n plt.hist(data, bins=bins)\n plt.title(title)\n\n canvas.draw()\n w, h = canvas.get_width_height()\n np_hist = np.fromstring(canvas.get_renderer().tostring_rgb(), dtype=np.uint8).reshape(h, w, 3)\n plt.close(figure)\n util.np_info(np_hist)\n return np_hist", "def build_hist(concept_values: np.ndarray, num_bins: int = 100) -> np.ndarray:\n hist, _ = np.histogram(concept_values, bins=num_bins, range=(0., 1.), density=True)\n return hist", "def autoHistogram(values, numbers_of_bins=None, ranges=None,\n weights=None):\n\n if len(values) == 0:\n raise ValueError, \"'values' is empty\"\n # Determine the number of dimensions.\n dimensions = len(values[0])\n # Determine the types of the columns.\n value_types = [ type(c) for c in values[0] ]\n\n if numbers_of_bins is None:\n # Assume 'None' along each dimension.\n numbers_of_bins = dimensions * [None]\n else:\n numbers_of_bins = list(numbers_of_bins)\n if len(numbers_of_bins) != dimensions:\n raise ValueError, \"'numbers_of_bins' is wrong length\"\n # Assign default numbers of bins to dimensions for which it is not\n # specified. \n for i in range(dimensions):\n if numbers_of_bins[i] is None:\n # Choose a reasonable number of bins.\n numbers_of_bins[i] = 20\n\n if ranges is None:\n # Assume 'None' along each dimension\n ranges = dimensions * [None]\n else:\n ranges = list(ranges)\n if len(ranges) != dimensions:\n raise ValueError, \"'ranges' is wrong length\"\n # Determine range automatically along each dimension for which it is\n # not specified. \n for i in range(dimensions):\n if ranges[i] is None:\n # Compute the actual range of values.\n lo, hi = hep.fn.minmax(None, [ v[i] for v in values ])\n if value_types[i] in (int, long):\n # For integral types, expand the upper limit by one so\n # the largest value fits in the last bin.\n hi += 1\n # We also need to adjust the range so it is a multiple\n # of the number of bins.\n hi += numbers_of_bins[i] - (hi - lo) % numbers_of_bins[i]\n else:\n lo, hi = makeNiceRange(lo, hi)\n else:\n lo, hi = ranges[i]\n # Cast the range to the appropriate type. \n ranges[i] = value_types[i](lo), value_types[i](hi)\n \n # Use 'float' bins if weights are given, 'int' otherwise.\n if weights is None:\n bin_type = int\n else:\n bin_type = float\n\n # Construct the histogram.\n histogram = Histogram(*zip(numbers_of_bins, ranges),\n **{ \"bin_type\": bin_type })\n # Fill the values into it.\n if weights is None:\n map(histogram.accumulate, values)\n else:\n for value, weight in zip(values, weights):\n histogram.accumulate(value, weight)\n\n return histogram", "def hist(bins, y, /, axis=0):\n if bins.ndim != 1:\n raise ValueError('Bins must be 1-dimensional.')\n\n with quack._ArrayContext(y, push_right=axis) as context:\n # Get flattened data\n y = context.data\n yhist = np.empty((y.shape[0], bins.size - 1))\n\n # Take histogram\n for k in range(y.shape[0]):\n yhist[k, :] = np.histogram(y[k, :], bins=bins)[0]\n\n # Replace data\n context.replace_data(yhist)\n\n # Return unflattened data\n return context.data", "def _get_hist_data(self,hists,data):\n try:\n for hist in hists:\n self._get_hist_data(hist,data)\n except TypeError:\n hist_dict = {\"name\" : hists.hist.GetName(),\"cut_labels\" : hists.cut_labels, \"use_for_eff\" : self.use_for_eff}\n data.append(hist_dict)\n return data", "def add_histogram_data(name, sample):\n global _histograms\n\n histogram = _find_histogram(name)\n if histogram is None:\n histogram = Histogram(name, 8)\n _histograms.append(histogram)\n\n histogram.add_data(sample)", "def collect_absolute_value(self, name_to_arr):\n for tensor, data_arr in name_to_arr.items():\n data_arr = np.asarray(data_arr) # noqa: PLW2901\n data_arr = data_arr.flatten() # noqa: PLW2901\n if data_arr.size > 0:\n min_value = np.min(data_arr)\n max_value = np.max(data_arr)\n else:\n min_value = 0\n max_value = 0\n\n data_arr = np.absolute(data_arr) # only consider absolute value # noqa: PLW2901\n\n if tensor not in self.histogram_dict:\n # first time it uses num_bins to compute histogram.\n hist, hist_edges = np.histogram(data_arr, bins=self.num_bins)\n self.histogram_dict[tensor] = (hist, hist_edges, min_value, max_value)\n else:\n old_histogram = self.histogram_dict[tensor]\n old_min = old_histogram[2]\n old_max = old_histogram[3]\n old_hist = old_histogram[0]\n old_hist_edges = old_histogram[1]\n temp_amax = np.max(data_arr)\n if temp_amax > old_hist_edges[-1]:\n # increase the number of bins\n width = old_hist_edges[1] - old_hist_edges[0]\n # NOTE: np.arange may create an extra bin after the one containing temp_amax\n new_bin_edges = np.arange(old_hist_edges[-1] + width, temp_amax + width, width)\n old_hist_edges = np.hstack((old_hist_edges, new_bin_edges))\n hist, hist_edges = np.histogram(data_arr, bins=old_hist_edges)\n hist[: len(old_hist)] += old_hist\n self.histogram_dict[tensor] = (hist, hist_edges, min(old_min, min_value), max(old_max, max_value))", "def databinning(datagenerated1, datagenerated2, datagenerated3, bins_list):\n yhist1, _ = np.histogram(a=datagenerated1, bins=bins_list)\n yhist2, _ = np.histogram(a=datagenerated2, bins=bins_list)\n yhist3, _ = np.histogram(a=datagenerated3, bins=bins_list)\n\n return yhist1, yhist2, yhist3", "def makeHistogram(values, numBins, xLabel, yLabel, title=None):", "def hist(self, num_bins=20, bin_range=None, adjoin=True, individually=True, **kwargs):\n def dynamic_hist(obj, **dynkwargs):\n if isinstance(obj, (NdOverlay, Overlay)):\n index = kwargs.get('index', 0)\n obj = obj.get(index)\n return obj.hist(num_bins=num_bins, bin_range=bin_range,\n adjoin=False, **kwargs)\n\n from ..util import Dynamic\n hist = Dynamic(self, streams=self.streams, link_inputs=False,\n operation=dynamic_hist)\n if adjoin:\n return self << hist\n else:\n return hist", "def genHistArrays(df,csname,bins=50):\n #initiate matrix which will contain values of histograms\n allpixV = np.zeros((df.shape[0],bins*3))\n #attain histograms\n hists = df['SKImage'].apply(lambda x: getHists(x,bins))\n \n #Generate column names for result dataframe\n fullnames = []\n for chs in ['CH1', 'CH2', 'CH3']:\n fullnames.extend([chs+'-'+str(j) for j in range(bins)])\n fullnames = [csname+'-'+str(j) for j in fullnames]\n \n #extract histograms\n for rowi, histArr in enumerate(hists):\n allpixV[rowi,:] = np.array(histArr).flatten()\n \n return allpixV,fullnames" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a cut function and tries to return a title for it
def get_title(fn): title = fn.name if hasattr(fn, 'name') else fn.__name__ title = title.replace('_cut_function','') suffix = [] # if 'JetsAK15_subleading_' in title: # suffix.append(r'$j^{\mathrm{AK15}}_{\mathrm{subl}}$') title = title.replace('JetsAK15_subleading_', '').replace('subleading_', '') if hasattr(fn, 'left'): suffix.append('({:.0f} < {} < {:.0f})'.format(fn.left, svjflatanalysis.utils.get_title('mt'), fn.right)) # Transform variable name to title stirng title = svjflatanalysis.utils.get_title(title) if hasattr(fn, 'operator'): title += ' ' + fn.operator + ' cut' # Add the suffix title += ' ' + ' '.join(suffix) return title
[ "def sub_case_title(self, arg_tc):\n return self.title", "def SubTitle(Text):\n pass", "def make_title(words):", "def get_title(title: str):\n return title", "def generate_finding_title(title):\n\treturn \"Trend Micro: {}\".format(title)", "def getTitle(test:str) -> str:\n return test[5:].strip()", "def title(self):\n name = self.concept_spec.name\n name = name[0].upper() + name[1:]\n return name.replace('_', ' ').replace('-', ' ')", "def get_title(concept):\n start_location = concept.find(TITLE_KEY) # Find the location of the Start of the title\n end_location = concept.find(DESCRIPTION_KEY) # Find the end of the title\n title = concept[start_location + len(TITLE_KEY): end_location-1] # The title will be between them!\n return title", "def title(mystr):\n print(mystr.title())", "def _title(hit: DD) -> str:\n return hit[\"_source\"][\"title\"]", "def title(value):\r\n title_word = lambda w: w if RE_UPPERCASE.search(w) else old_title(w)\r\n return re.sub('(\\S+)', lambda m: title_word(m.group(0)), value)", "def title(text):\n return str(text).replace(\"_\", \" \").title()", "def doTitle(bunch, text, env):\n return env[\"pagename\"]", "def test_getTitle(self):\n def checkNameAndTitle(name, titlesolution):\n title = self._nameClassifierBuilder._getTitle(name)\n self.assertEquals(titlesolution, title)\n\n checkNameAndTitle(\"Mrs. ldajfhgp\", \"Mrs\")\n checkNameAndTitle(\"dlsfajkMrdlkjaf\", \"Mr\")\n checkNameAndTitle(\"dagddgwdasJonkheer\", \"Jonkheer\")", "def get_desc(paste):\n return paste.title", "def test_get_title(double_title, single_title, empty_title):\n assert get_title(double_title) == \"Parton distributions with LHC data\"\n assert get_title(single_title) == \"The Large Hadron Collider\"\n assert get_title(empty_title) == \"\"\n\n no_title_key = {\n \"not_titles\": []\n }\n assert get_title(no_title_key) == \"\"", "def print_title(title, outf):\n\n print(\"\\n\\n%s\" % title, file=outf)\n print(\"=\" * len(title), file=outf)\n print(\"\")\n\n return None", "def title(self,reg):\n return text.regex(self.soc,reg,'s')", "def format_title(text):\n text = text.strip()\n # if empty string, return \"\"\n if len(text) == 0:\n return text\n else:\n text = text.lower() # lower all char\n\n # Change to in single space format\n words = [word for word in text.strip().split(\" \") if len(word) >= 1]\n\n # Capitalize all words except function word\n words_new = list()\n for word in words:\n if word not in FUNCTION_WORD:\n word = word[0].upper() + word[1:]\n words_new.append(word)\n\n # Make sure first word always be capitalized\n words_new[0] = words_new[0][0].upper() + words_new[0][1:]\n\n return \" \".join(words_new)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The Windows version of base.processInterrupt Note! This doesn't work terribly well with a lot of processes.
def processInterrupt(uPid): try: # pylint: disable=no-member win32console.GenerateConsoleCtrlEvent(win32con.CTRL_BREAK_EVENT, uPid); #GenerateConsoleCtrlEvent = ctypes.windll.kernel32.GenerateConsoleCtrlEvent #rc = GenerateConsoleCtrlEvent(1, uPid); #reporter.log('GenerateConsoleCtrlEvent -> %s' % (rc,)); fRc = True; except: reporter.logXcpt('uPid=%s' % (uPid,)); fRc = False; return fRc;
[ "def send_interrupt(process):\n logger.debug(\"Interrupting process {0} ...\".format(process))\n try:\n os.kill(process.pid, SIGINT)\n # os.kill(process.pid, SIGTERM)\n except OSError:\n pass # process cannot be killed\n except TypeError:\n pass # pid is incorrect type\n except UnboundLocalError:\n pass # 'process' is not defined\n except AttributeError:\n pass # Trying to kill \"None\"", "def print_process_interrupted(exc: \"KeyboardInterrupt\"):\n _print(f\"\\nInterrupted. {exc}\")", "def fdp_interrupt(debugger, command, exe_ctx, result, internal_dict):\n vm.interrupt()", "def interrupt_main():\n if _main:\n raise KeyboardInterrupt\n else:\n global _interrupt\n _interrupt = True", "def HandleKeyboardInterrupt(self):\n raise NotImplementedError()", "def interrupt(self):\n raise NotImplementedError", "def siginterrupt(sig, flag): # real signature unknown; restored from __doc__\n pass", "def checkInterrupt():\n if wasInterrupted():\n raise KeyboardInterrupt()", "def processKill(uPid):\n return processTerminate(uPid);", "def interrupt(self):\n self.interrupt_tick_tocking = True", "def handle_ctrl_c():\r\n oldhook = sys.excepthook\r\n def newhook(exctype, value, traceback):\r\n if exctype == KeyboardInterrupt:\r\n log('\\nInterrupted.\\n')\r\n else:\r\n return oldhook(exctype, value, traceback)\r\n sys.excepthook = newhook", "def suppress_keyboard_interrupt_message():\n old_excepthook = sys.excepthook\n\n def new_hook(type, value, traceback):\n if type != KeyboardInterrupt:\n old_excepthook(type, value, traceback)\n else:\n pass\n\n sys.excepthook = new_hook", "def interrupt(self, dry_run=True, **tags):\n printer = lambda result: self.affected_steps_printer('interrupt', result, dry_run, self.stdout)\n self._call_trail_client_method('interrupt', printer, dry_run=dry_run, **tags)", "def setinterrupt(self, chr: int, /) -> None:", "def _terminate_process(p):\n try:\n p.terminate()\n except (OSError, IOError):\n pass", "def Interrupt(self):\n return self.interrupt;", "def ignore_keyboardinterrupts(func):\n def wrapper(*args, **kwargs):\n try:\n func(*args, **kwargs)\n except KeyboardInterrupt:\n pass\n return wrapper", "def cancel_exec_request(*args):\n return _ida_kernwin.cancel_exec_request(*args)", "def EndProcess(proc, code=0):\n win32process.TerminateProcess(proc, code)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Posts a WM_CLOSE message to the specified thread.
def postThreadMesssageClose(uTid): fRc = False; try: win32api.PostThreadMessage(uTid, win32con.WM_CLOSE, 0, 0); # pylint: disable=no-member fRc = True; except: reporter.logXcpt('uTid=%s' % (uTid,)); return fRc;
[ "def postThreadMesssageQuit(uTid):\n fRc = False;\n try:\n win32api.PostThreadMessage(uTid, win32con.WM_QUIT, 0x40010004, 0); # DBG_TERMINATE_PROCESS # pylint: disable=no-member\n fRc = True;\n except:\n reporter.logXcpt('uTid=%s' % (uTid,));\n return fRc;", "def terminate_thread(thread):\n if not thread.isAlive():\n return\n exc = ctypes.py_object(SystemExit)\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(\n ctypes.c_long(thread.ident), exc)\n if res == 0:\n raise ValueError(\"No Existe el id de este hilo\")\n elif res > 1:\n \"\"\"\n si devuelve un número mayor que uno, estás en problemas, entonces\n llamas de nuevo con exc = NULL para revertir el efecto.\n \"\"\"\n ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None)\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")", "def terminate_thread(thread):\n if not thread.isAlive():\n return\n\n exc = ctypes.py_object(SystemExit)\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(\n ctypes.c_long(thread.ident), exc)\n if res == 0:\n raise ValueError(\"nonexistent thread id\")\n elif res > 1:\n # \"\"\"if it returns a number greater than one, you're in trouble,\n # and you should call it again with exc=NULL to revert the effect\"\"\"\n ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None)\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")", "def window_closing():\r\n global sock\r\n if sock:\r\n disconnect()\r\n window.quit()", "def closeEvent(self, event):\n self._TryCloseThreads()\n event.accept()", "def _terminate_thread(thread):\n if not thread.isAlive():\n return\n\n exc = ctypes.py_object(SystemExit)\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(thread.ident), exc)\n if res == 0:\n raise ValueError(\"nonexistent thread id\")\n elif res > 1:\n # \"\"\"if it returns a number greater than one, you're in trouble,\n # and you should call it again with exc=NULL to revert the effect\"\"\"\n ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None)\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")", "def exit(self):\n self._stopevent.set()\n self.subscriber.channel.close()\n\n threading.Thread.join(self)\n self.logger.debug(f\"Thread with callback {self.subscriber.callback} exited with success.\")", "def threaded_quit(self, arg):\n threading_list = threading.enumerate()\n mythread = threading.currentThread()\n for t in threading_list:\n if t != mythread:\n ctype_async_raise(t, SystemExit)\n pass\n pass\n raise SystemExit", "def bcp_goodbye(self, **kwargs):\n if self.config['mediacontroller']['exit_on_disconnect']:\n self.socket_thread.sending_thread.stop()\n sys.exit()", "def on_close(self, event):\r\n if self.thread is not None:\r\n self.thread.abort = True\r\n if self.tester is not None:\r\n try:\r\n self.tester.Close()\r\n except:\r\n pass\r\n self.close_debug_console()\r\n event.Skip()", "def stop(self, joinThread=True):\n self.server.disconnect(self.remote)\n\n if self._ws_server:\n self._ws_server.close()\n self._ws_server = None\n\n if not self.thread:\n return\n\n # asyncio.get_event_loop().stop()\n if joinThread:\n print('joining thread')\n self.thread.join()\n self.thread = None\n logger.debug('WebsocketServer thread stopped')", "def thread_shutdown(self):\n self.stop_thread = True", "def stopping_thread(self, threadname):\n\n # self.threads[threadname][0].stop()\n self.threads[threadname][1].quit()\n self.threads[threadname][1].wait()\n del self.threads[threadname]", "def shutdown(self):\n self.thread.server.shutdown()\n self.thread.join()", "def exit_chat(self):\n self.close()\n try:\n self.zeromq_listener.running = False\n self.thread.terminate()\n cast(self.pid, (Atom(\"newmsg\"), \"--quit\"))\n except:\n pass", "def bcp_goodbye(self, **kwargs):\n if self.config['media_controller']['exit_on_disconnect']:\n self.socket_thread.sending_thread.stop()\n sys.exit()", "def kill(self, threadid):\n self.rpc.call(MsfRpcMethod.CoreThreadKill, [threadid])", "def CloseForum(self, event):\n pass", "def close(self):\n try:\n self.logger.info(\"function: MailClientThunderbirdVmmSide::close\")\n self.guest_obj.send(\"application \" + \"mailClientThunderbird \" + str(self.window_id) + \" close \")\n except Exception as e:\n raise Exception(\"error MailClientThunderbirdVmmSide:close()\" + str(e))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Posts a WM_QUIT message to the specified thread.
def postThreadMesssageQuit(uTid): fRc = False; try: win32api.PostThreadMessage(uTid, win32con.WM_QUIT, 0x40010004, 0); # DBG_TERMINATE_PROCESS # pylint: disable=no-member fRc = True; except: reporter.logXcpt('uTid=%s' % (uTid,)); return fRc;
[ "def postThreadMesssageClose(uTid):\n fRc = False;\n try:\n win32api.PostThreadMessage(uTid, win32con.WM_CLOSE, 0, 0); # pylint: disable=no-member\n fRc = True;\n except:\n reporter.logXcpt('uTid=%s' % (uTid,));\n return fRc;", "def threaded_quit(self, arg):\n threading_list = threading.enumerate()\n mythread = threading.currentThread()\n for t in threading_list:\n if t != mythread:\n ctype_async_raise(t, SystemExit)\n pass\n pass\n raise SystemExit", "def quit(self):\r\n if self._thread is not None and self._thread.is_alive():\r\n self.write(b'\\x18\\n')", "def terminate_thread(thread):\n if not thread.isAlive():\n return\n\n exc = ctypes.py_object(SystemExit)\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(\n ctypes.c_long(thread.ident), exc)\n if res == 0:\n raise ValueError(\"nonexistent thread id\")\n elif res > 1:\n # \"\"\"if it returns a number greater than one, you're in trouble,\n # and you should call it again with exc=NULL to revert the effect\"\"\"\n ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None)\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")", "def terminate_thread(thread):\n if not thread.isAlive():\n return\n exc = ctypes.py_object(SystemExit)\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(\n ctypes.c_long(thread.ident), exc)\n if res == 0:\n raise ValueError(\"No Existe el id de este hilo\")\n elif res > 1:\n \"\"\"\n si devuelve un número mayor que uno, estás en problemas, entonces\n llamas de nuevo con exc = NULL para revertir el efecto.\n \"\"\"\n ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None)\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")", "def _terminate_thread(thread):\n if not thread.isAlive():\n return\n\n exc = ctypes.py_object(SystemExit)\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(thread.ident), exc)\n if res == 0:\n raise ValueError(\"nonexistent thread id\")\n elif res > 1:\n # \"\"\"if it returns a number greater than one, you're in trouble,\n # and you should call it again with exc=NULL to revert the effect\"\"\"\n ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None)\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")", "def nothread_quit(self, arg):\n\n self.debugger.core.stop()\n self.debugger.core.execution_status = \"Quit command\"\n raise SystemExit", "def quit(self, reason=''):\n reply = 'QUIT :%s' % (reason)\n self.send(reply)", "def quit(self):\n msg = QuitRequest()\n sendmsg(self.socket, msg)\n (rid, msg) = recvmsg(self.socket)\n if rid != MsgType.QuitResponse:\n print(\"Did not receive quit response from server\")\n self._close()", "def quit(self):\r\n self.shutdown()", "def cleanThread(self):\n logging.info(\"Clean Thread\")\n self.thread.quit()\n self.thread.wait()", "def quit(self):\n # Makes sure to send the right command to server\n if self.state == \"inqueue\":\n self.Send({\"action\": \"queueQUIT\", \"id\": self.serverID})\n elif self.state == \"ingame\":\n self.Send({\"action\": \"gameQUIT\", \"gameid\": self.gameid, \"num\": self.num, \"id\": self.serverID})\n else:\n self.Send({\"action\": \"QUIT\", \"id\": self.serverID})\n\n connection.Pump()\n self.Pump()\n pygame.quit()\n sys.exit(0)", "def quit_cmd(self):\n print_debug(\"Executing QUIT\")\n command = \"QUIT\\r\\n\"\n msg_rec = self.send_and_log(self.s, command)\n self.close_socket(self.s) # Close socket since we're done.\n return msg_rec", "def ev_quit(self, event: tcod.event.Quit):\n raise SystemExit(0) # `SystemExit(0)` lukker programmet ned uden at give den normale fejl besked.", "def shutdown():\n\tglobal StoreWorkerThread, StoreWorkerThreadLock\n\n\tStoreWorkerThreadLock.acquire()\n\t\n\tif not running():\n\t\t# for convenience, this is not an error\n\t\tStoreWorkerThread = None\n\t\tStoreWorkerThreadLock.release()\n\t\treturn\n\t\t\n\t# send 'quit' command\n\tStoreCmdQueue.put(('quit',))\n\t\n\t# wait for thread to exit\n\tStoreWorkerThread.join()\n\tStoreWorkerThread = None\n\t\n\tStoreWorkerThreadLock.release()", "def shutdown(self):\n self.thread.server.shutdown()\n self.thread.join()", "def quit(a):\r\n\t\t_trigger(\"on_quit\")", "def quit_program():\n\n exit_message = \"Closing the mailroom for the day...\"\n print(exit_message)\n sys.exit()", "def quit_(control):\n resp = ftplib_.sendquit(control)\n print(resp)\n sys.exit()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The Windows version of base.processKill
def processKill(uPid): return processTerminate(uPid);
[ "def kill_subprocess(self):\n try:\n self.process.kill()\n except OSError:\n pass\n return", "def cmd_process_kill(self, mysql_pid):\n raise NotImplementedError", "def kill():\n Log.info(\"Kill tns processes.\")\n if Settings.HOST_OS == OSType.WINDOWS:\n Process.kill(proc_name='node')\n else:\n Process.kill(proc_name='node', proc_cmdline=Settings.Executables.TNS)\n Process.kill_by_commandline(cmdline='webpack.js')", "def killProcessInGuest(self, pid):\r\n return self.vmrun('killProcessInGuest', pid)", "def kill(self):\n\n #Kill relevant process names\n if self.driver_type != 'firefox_wdm':\n os.system('pkill -f chrome')\n os.system('pkill -f Chrome')\n os.system('pkill -f chromedriver')\n else:\n os.system('pkill -f FireFox')\n #TODO: confirm this -> os.system('pkill -f geckodriver')", "def _kill(self) -> None:\n if not hasattr(self, \"proc\"):\n raise FuzzFrontendError(\"Attempted to kill non-running PID.\")\n\n self.proc.terminate()\n try:\n self.proc.wait(timeout=0.5)\n L.info(\"Fuzzer subprocess exited with `%d`\", self.proc.returncode)\n except subprocess.TimeoutExpired:\n raise FuzzFrontendError(\"Subprocess could not terminate in time\")\n\n self._on = False", "def test_stopProcessForcedKill(self):\r\n self.pm.startService()\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.assertIn(\"foo\", self.pm.protocols)\r\n self.reactor.advance(self.pm.threshold)\r\n proc = self.pm.protocols[\"foo\"].transport\r\n # Arrange for the fake process to live longer than the killTime\r\n proc._terminationDelay = self.pm.killTime + 1\r\n self.pm.stopProcess(\"foo\")\r\n # If process doesn't die before the killTime, procmon should\r\n # terminate it\r\n self.reactor.advance(self.pm.killTime - 1)\r\n self.assertEqual(0.0, self.pm.timeStarted[\"foo\"])\r\n\r\n self.reactor.advance(1)\r\n # We expect it to be immediately restarted\r\n self.assertEqual(self.reactor.seconds(), self.pm.timeStarted[\"foo\"])", "def kill_process(pid):\n\n ps.kill_process(pid)\n click.echo('Process %s has been terminated' % pid)", "def EndProcess(proc, code=0):\n win32process.TerminateProcess(proc, code)", "def kill():\n sb.call(\"Taskkill /IM SLDWORKS.exe /F\")", "def kill(self):\n\n self.proc.kill()", "def _TerminateProcessByPid(self, pid):\n self._RaiseIfNotRegistered(pid)\n\n process = self._processes_per_pid[pid]\n\n self._TerminateProcess(process)\n self._StopMonitoringProcess(process)", "def kill_cmd(process, name: str):\n if process.poll() is None:\n process.kill()\n process.wait()\n logger.info(f\"Child process {name} has been killed\")", "def kill(pid):\n p = psutil.Process(pid)\n\n try:\n p.kill()\n except Exception:\n pass", "def killProcess(cls, pid):\n try:\n os.kill(pid, signal.SIGTERM)\n while True:\n time.sleep(1)\n os.kill(pid, 0)\n except OSError as e:\n if e.errno != errno.ESRCH:\n raise e", "def _terminate_process(p):\n try:\n p.terminate()\n except (OSError, IOError):\n pass", "def kill(self):\n if self.process is not None:\n LOGGER.info('Killing command...')\n self.process.kill()\n self.process = None", "def quit_rybka(process):\n try:\n process.kill()\n except OSError:\n print \"OSError: Cannot close Rybka.exe\"", "def command_kill(self):\n required_arguments = {\n 'kill': self.BINARY_PATHS['kill'],\n 'pid': self.screen_pid\n }\n\n self._previous_arguments = required_arguments\n return '%(kill)s %(pid)s' % required_arguments" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The Windows version of base.processCheckPidAndName
def processCheckPidAndName(uPid, sName): fRc = processExists(uPid); if fRc is True: try: from win32com.client import GetObject; # pylint: disable=F0401 oWmi = GetObject('winmgmts:'); aoProcesses = oWmi.InstancesOf('Win32_Process'); for oProcess in aoProcesses: if long(oProcess.Properties_("ProcessId").Value) == uPid: sCurName = oProcess.Properties_("Name").Value; reporter.log2('uPid=%s sName=%s sCurName=%s' % (uPid, sName, sCurName)); sName = sName.lower(); sCurName = sCurName.lower(); if os.path.basename(sName) == sName: sCurName = os.path.basename(sCurName); if sCurName == sName \ or sCurName + '.exe' == sName \ or sCurName == sName + '.exe': fRc = True; break; except: reporter.logXcpt('uPid=%s sName=%s' % (uPid, sName)); return fRc;
[ "def _get_process_name(pid):\n PROCESS_QUERY_INFORMATION = 0x0400\n PROCESS_VM_READ = 0x0010\n LIST_MODULES_ALL = 0x03\n MAX_PATH = 260\n\n kernel32 = ctypes.WinDLL(\"kernel32\", use_last_error=True)\n kernel32.OpenProcess.rettype = ctypes.wintypes.HANDLE\n kernel32.OpenProcess.argtypes = [\n ctypes.wintypes.DWORD,\n ctypes.wintypes.BOOL,\n ctypes.wintypes.DWORD\n ]\n\n kernel32 = ctypes.WinDLL(\"kernel32\", use_last_error=True)\n kernel32.CloseHandle.rettype = ctypes.wintypes.BOOL\n kernel32.CloseHandle.argtypes = [\n ctypes.wintypes.HANDLE\n ]\n\n psapi = ctypes.WinDLL(\"psapi\", use_last_error=True)\n psapi.EnumProcessModulesEx.retype = ctypes.wintypes.BOOL\n psapi.EnumProcessModulesEx.argtypes = [\n ctypes.wintypes.HANDLE,\n ctypes.POINTER(ctypes.wintypes.HMODULE),\n ctypes.wintypes.DWORD,\n ctypes.POINTER(ctypes.wintypes.DWORD),\n ctypes.wintypes.DWORD]\n\n psapi.GetModuleBaseNameW.retype = ctypes.wintypes.DWORD\n psapi.GetModuleBaseNameW.argtypes = [\n ctypes.wintypes.HANDLE,\n ctypes.wintypes.HMODULE,\n ctypes.wintypes.LPWSTR,\n ctypes.wintypes.DWORD\n ]\n\n proc = kernel32.OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, False, pid)\n if not proc:\n return\n\n module = ctypes.wintypes.HMODULE()\n needed = ctypes.wintypes.DWORD()\n result = psapi.EnumProcessModulesEx(proc,\n ctypes.byref(module),\n ctypes.sizeof(ctypes.wintypes.HMODULE),\n ctypes.byref(needed),\n LIST_MODULES_ALL)\n if not result:\n kernel32.CloseHandle(proc)\n return\n\n buffer_length = MAX_PATH\n buffer = ctypes.create_unicode_buffer(buffer_length)\n size = psapi.GetModuleBaseNameW(proc, module, buffer, buffer_length)\n\n while size > 0 and ctypes.get_last_error() == 122: # ERROR_INSUFFICIENT_BUFFER\n buffer_length += MAX_PATH\n buffer = ctypes.create_unicode_buffer(buffer_length)\n size = psapi.GetModuleBaseNameW(proc, module, buffer, buffer_length)\n\n kernel32.CloseHandle(module)\n kernel32.CloseHandle(proc)\n\n if not size:\n return\n\n name = buffer.value[:size]\n return name", "def validate_process(pid, name=None):\n try:\n with open(os.path.join('/proc', str(pid), 'comm')) as proc_comm:\n proc_name = next(proc_comm).strip()\n except (OSError, IOError):\n return False\n return name is None or name == proc_name", "def exe_match(expected_name):\n # expected_name = expected_name.encode('ascii')\n def f(win):\n n = conv(win.process_name)\n return n == expected_name\n return f", "def _get_pid(self, name):\n pid = self.user32.FindWindowW(None, name)\n return None or pid", "def check_process_for_pid(pid, process_name):\n pid = int(pid)\n proc = psutil.Process(pid)\n return proc.name() == process_name", "def process_exists(self):\n cmdline = [path.basename(sys.executable),sys.argv[0],self.state['name'].lower()]\n for p in process_iter():\n if p.cmdline() == cmdline: return p.pid\n return False", "def process_exists(name):\n for pid in [pid for pid in os.listdir(\"/proc\") if pid.isdigit()]:\n try:\n exe_name = os.readlink(os.path.join(\"/proc/\", pid, \"exe\"))\n except OSError:\n continue\n if exe_name and exe_name.endswith(os.path.join(\"/\", name)):\n return pid\n return None", "def get_pid(name):\n try: \n for process in psutil.process_iter():\n try:\n proc = process.as_dict(attrs=['pid', 'name'])\n if name in proc['name']:\n pid = proc['pid']\n logging.info(f\"Found PID {pid} for {name}\")\n return int(pid) \n except (psutil.NoSuchProcess, psutil.AccessDenied , psutil.ZombieProcess) :\n pass \n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)", "def make_process_name_useful():\n set_kernel_process_name(os.path.basename(sys.argv[0]))", "def check_pid(pid):\n result = None\n try:\n s = os.stat('/proc/' + pid)\n if s.st_uid == our_uid:\n cwd = os.path.realpath('/proc/' + pid + '/cwd')\n if cwd == kill_dir and int(pid) != our_pid:\n f = open('/proc/' + pid + '/cmdline')\n cmdline = f.read().split('\\x00')[:-1]\n f.close()\n result = cmdline\n except OSError:\n # We can't read all our processes; that's ok\n pass\n return result", "def get_process_name(pid):\n proc = subprocess.Popen(['ps', '-p', pid, '-o', 'comm='],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err=proc.communicate()\n return out.strip().decode('utf-8')", "def name(self):\n # This is how PIDs 0 and 4 are always represented in taskmgr\n # and process-hacker.\n if self.pid == 0:\n return \"System Idle Process\"\n if self.pid == 4:\n return \"System\"\n return os.path.basename(self.exe())", "def test_find_processes_matches_cmdline(self, test_system_status):\n num_procs_found = len(test_system_status.system_status.find_processes_by_name(\n test_system_status.mocked_proc_name\n ))\n assert num_procs_found == test_system_status.num_mocked_procs", "def get_pid_from_processName(_processName=appName_to_processName.get('__test__')):\n import subprocess\n sub_proc = subprocess.run('tasklist', capture_output=True)\n sub_proc = sub_proc.stdout.decode('ASCII')\n table = [line.split() for line in sub_proc.split('\\r\\n')]\n if type(_processName) is str or int:\n '''There is only one process to process.'''\n _return = []\n for i in table:\n if str(_processName) in i:\n _return.append(i[1])\n else:\n '''There is a list of processes to process.'''\n pass", "def get_name(pid, default=None):\n try:\n return only(\n process.Properties_(\"Name\").Value\n for process in win32com.client.GetObject('winmgmts:').InstancesOf('Win32_Process')\n if process.Properties_(\"ProcessID\").Value == pid\n )\n except TooFewItemsError:\n return default", "def _get_process_id(self):\n for proc in psutil.process_iter():\n if self.exe in proc.name():\n return proc.pid\n raise Exception(f\"Cannot find executable with provided name: {self.exe}\")", "def isRunning(pid):", "def RunningOnWindows():\n return os.name == WINDOWS", "def check_process_exist(process_name): \n returncode = '' \n try:\n p=os.popen('tasklist /FI \"IMAGENAME eq %s\"' % process_name) \n returncode = p.read().count(process_name) \n if returncode:\n initlog('%s exists' % process_name)\n except Exception, e:\n initlog(str(e)) \n return returncode" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Logs windows memory stats.
def logMemoryStats(): class MemoryStatusEx(ctypes.Structure): """ MEMORYSTATUSEX """ kaFields = [ ( 'dwLength', ctypes.c_ulong ), ( 'dwMemoryLoad', ctypes.c_ulong ), ( 'ullTotalPhys', ctypes.c_ulonglong ), ( 'ullAvailPhys', ctypes.c_ulonglong ), ( 'ullTotalPageFile', ctypes.c_ulonglong ), ( 'ullAvailPageFile', ctypes.c_ulonglong ), ( 'ullTotalVirtual', ctypes.c_ulonglong ), ( 'ullAvailVirtual', ctypes.c_ulonglong ), ( 'ullAvailExtendedVirtual', ctypes.c_ulonglong ), ]; _fields_ = kaFields; # pylint: disable=invalid-name def __init__(self): super(MemoryStatusEx, self).__init__(); self.dwLength = ctypes.sizeof(self); try: oStats = MemoryStatusEx(); ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(oStats)); except: reporter.logXcpt(); return False; reporter.log('Memory statistics:'); for sField, _ in MemoryStatusEx.kaFields: reporter.log(' %32s: %s' % (sField, getattr(oStats, sField))); return True;
[ "def print_memory_stats(location_tag=\"undef\"):\n try:\n import psutil\n p = psutil.Process(os.getpid())\n rm, vm = p.get_memory_info()\n print \"MEM_STAT (%s) rm=%s, vm=%s\" % (location_tag, rm, vm)\n except ImportError:\n print \"psutil module not available\"", "def logmem(cls, tag):\n\n cls.logger.info(\"----%s----\", str(tag))\n mem = psutil.virtual_memory()\n cls.logger.info(\"total:%s M\", mem.total / 1024 / 1024)\n cls.logger.info(\"available:%s M\", mem.available / 1024 / 1024)\n cls.logger.info(\"used:%s M\", mem.used / 1024 / 1024)\n cls.logger.info(\"free:%s M\", mem.free / 1024 / 1024)\n cls.logger.info(\"active:%s M\", mem.active / 1024 / 1024)\n cls.logger.info(\"inactive:%s M\", mem.inactive / 1024 / 1024)\n cls.logger.info(\"buffers:%s M\", mem.buffers / 1024 / 1024)\n cls.logger.info(\"cached:%s M\", mem.cached / 1024 / 1024)\n cls.logger.info(\"shared:%s M\", mem.shared / 1024 / 1024)", "def _LogMemoryUsage(self, process_label):\n mem_info = process_label.process.GetMemoryInformation()\n logging.info((\n u'{0:s} - RSS: {1:d}, VMS: {2:d}, Shared: {3:d}, Text: {4:d}, lib: '\n u'{5:d}, data: {6:d}, dirty: {7:d}, Memory Percent: {8:0.2f}%').format(\n process_label.label, mem_info.rss, mem_info.vms, mem_info.shared,\n mem_info.text, mem_info.lib, mem_info.data, mem_info.dirty,\n mem_info.percent * 100))", "def write_mem_status(self):\n memoryUsed = (\n psutil.Process(self.pid).memory_info()[0] / 2.0 ** 30\n ) # current app memory use in GB\n self.writer.add_scalars(\n \"device/mem\",\n {\"memory_used (GB)\": memoryUsed},\n self.count,\n )\n self.memoryUsed.append(memoryUsed)", "def show_process_memory( cls, call_msg = \"\", log_level = None, print_it = False ):\n process = psutil.Process(os.getpid()) # import psutil\n mem = process.memory_info().rss\n # convert to mega and format\n mem_mega = mem/( 1e6 )\n msg = f\"{call_msg}process memory = {mem_mega:10,.2f} mega bytes \"\n if print_it:\n print( msg )\n if not ( log_level is None ):\n cls.__logger.log( log_level, msg )\n msg = f\"{mem_mega:10,.2f} mega bytes \"\n return ( mem, msg )", "def get_mem_stats(self):\n print(\"### get memory statistics starts ###\")\n mem_stat = dict()\n count = -1\n output = getattr(self.warp17_obj, 'shell')(command=\"show memory statistics\", pattern=\"warp17>\").response()\n out = output.split(\"\\n\")\n out = [i.rstrip().strip() for i in out]\n\n for index, line in enumerate(out):\n count += 1\n if len(line) > 0:\n if re.search(r\"MBUF\\s+RX:$\", line) is not None:\n mem_stat['mbuf_rx_alloc'] = re.search(r\"^Total\\s+Allocated:\\s+(\\d+)\", out[index+1]).group(1)\n mem_stat['mbuf_rx_free'] = re.search(r\"^Total\\s+Free\\s+:\\s+(\\d+)\", out[index+2]).group(1)\n\n if re.search(r\"MBUF\\s+TX\\s+HDR:$\", line) is not None:\n mem_stat['mbuf_tx_alloc'] = re.search(r\"^Total\\s+Allocated:\\s+(\\d+)\", out[index+1]).group(1)\n mem_stat['mbuf_tx_free'] = re.search(r\"^Total\\s+Free\\s+:\\s+(\\d+)\", out[index+2]).group(1)\n\n if re.search(r\"MBUF\\s+CLONE:$\", line) is not None:\n match = re.search(r\"^Total\\s+Allocated:\\s+(\\d+)\", out[index+1])\n mbuf_clone_alloc = match.group(1)\n match = re.search(r\"^Total\\s+Free\\s+:\\s+(\\d+)\", out[index+2])\n mbuf_clone_free = match.group(1)\n mem_stat['mbuf_clone_alloc'] = mbuf_clone_alloc\n mem_stat['mbuf_clone_free'] = mbuf_clone_free\n\n if re.search(r\"TCB:$\", line) is not None:\n match = re.search(r\"^Total\\s+Allocated:\\s+(\\d+)\", out[index+1])\n mbuf_tcb_alloc = match.group(1)\n match = re.search(r\"^Total\\s+Free\\s+:\\s+(\\d+)\", out[index+2])\n mbuf_tcb_free = match.group(1)\n mem_stat['mbuf_tcb_alloc'] = mbuf_tcb_alloc\n mem_stat['mbuf_tcb_free'] = mbuf_tcb_free\n\n if re.search(r\"UCB:$\", line) is not None:\n match = re.search(r\"^Total\\s+Allocated:\\s+(\\d+)\", out[index+1])\n mbuf_ucb_alloc = match.group(1)\n match = re.search(r\"^Total\\s+Free\\s+:\\s+(\\d+)\", out[index+2])\n mbuf_ucb_free = match.group(1)\n mem_stat['mbuf_ucb_alloc'] = mbuf_ucb_alloc\n mem_stat['mbuf_ucb_free'] = mbuf_ucb_free\n\n print(json.dumps(mem_stat, indent=4))\n return mem_stat", "def print_memory_diags(disable_print=False):\n process = psutil.Process(os.getpid())\n memory = process.memory_info().rss/1000000000.0\n if not disable_print:\n logging.info('\\tMemory usage: {:.3f} GB'.format(memory))\n return memory", "def print_mem_utilization():\n while True:\n memory_util = psutil.virtual_memory().percent\n print(\"Memory Utilization: {0}\".format(memory_util))\n memory_utilization.append(memory_util)\n time.sleep(5)", "def track(self, header=None):\n pynvml.nvmlInit()\n handle = pynvml.nvmlDeviceGetHandleByIndex(self.device)\n meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)\n self.curr_line = self.frame.f_lineno\n where_str = self.module_name + ' ' + self.func_name + ':' + ' line ' + str(self.curr_line)\n\n with open(self.gpu_profile_fn, 'a+') as f:\n if self.begin:\n f.write('GPU Memory Tracker\\n')\n self.begin = False\n if header is not None:\n f.write(\"\\n{}\\n\".format(header))\n\n time = datetime.datetime.now().strftime(\"%y%m%d%H%M%S\")\n f.write(\"{} | @ {:<50} | Total Used Memory:{:>7.1f}MB\\n\".format(time, where_str, meminfo.used / 1000 ** 2))\n\n if self.print_detail is True:\n info_list = self.get_tensor_info([t for t in self.get_tensors()])\n info_tuple_list = [info for info in zip(*info_list)]\n new_summary = {info + (info_tuple_list.count(info), ) for info in info_tuple_list}\n if len(new_summary - self.last_summary) != 0:\n increment = sorted(list(new_summary - self.last_summary), key=itemgetter(2), reverse=True)\n for t, s, b, c in increment:\n if b > self.memory_min:\n f.write(\n '+ | {:>2} * Size:{:<20} | Memory: {:.3f}MB | {:<20}\\n'.format(c, s, b * c, t))\n decrement = sorted(list(new_summary - self.last_summary), key=itemgetter(2), reverse=False)\n for t, s, b, c in decrement:\n if b > self.memory_min:\n f.write(\n '- | {:>2} * Size:{:<20} | Memory: {:.3f}MB | {:<20}\\n'.format(c, s, b * c, t))\n else:\n f.write('No change in tensor shapes, sizes and types\\n')\n self.last_summary = new_summary\n\n pynvml.nvmlShutdown()", "def log_connection_mem_stats(self) -> None:\n super(AbstractGatewayBlockchainConnection, self).log_connection_mem_stats()\n t0 = time.time()\n class_name = self.__class__.__name__\n if self.node.message_converter is not None:\n hooks.add_obj_mem_stats(\n class_name,\n self.network_num,\n self.node.message_converter,\n \"message_converter\",\n memory_utils.ObjectSize(\n \"message_converter\", memory_utils.get_special_size(self.node.message_converter).size,\n is_actual_size=True\n ),\n object_item_count=1,\n object_type=memory_utils.ObjectType.META,\n size_type=memory_utils.SizeType.SPECIAL\n )\n t_m_c_diff = round(time.time() - t0, 3)\n logger.debug(\"Recording {} message_converter MemoryStats took: {} seconds\", class_name, t_m_c_diff)", "def memory_monitor(queue: Queue, outfile_prefix: str) -> None:\n tracemalloc.start()\n old_max = 0\n snapshot = None\n wait_time = 0.1\n\n fout = open(\"{}_MAX_TRACKER\".format(outfile_prefix), \"w\")\n b2mb = 1 / 1048576\n if sys.platform == \"linux\":\n # linux outputs max_rss in KB not B\n b2mb = 1 / 1024\n\n while True:\n if queue.empty():\n time.sleep(wait_time)\n max_rss = getrusage(RUSAGE_SELF).ru_maxrss\n if max_rss > old_max:\n snapshot = tracemalloc.take_snapshot()\n line = \"{} max RSS {:.2f} MiB\".format(datetime.now(), max_rss * b2mb)\n print(line, file=fout)\n old_max = max_rss\n else:\n snapshot.dump(\"{}_SNAPSHOT\".format(outfile_prefix))\n fout.close()\n tracemalloc.stop()\n return", "def get_memory_stats():\n with open('/proc/meminfo') as f:\n memfile = list(f)\n if len(memfile) <= 0:\n return \"Unable to get memory info\"\n memtotal = float((memfile[0].split())[1])\n memfree = float((memfile[1].split())[1])\n memused = memtotal - memfree\n membuffers = float((memfile[3].split())[1])\n memcache = float((memfile[4].split())[1])\n return {'memtotal':memtotal,'memfree':memfree,'memused':memused,'membuffers':membuffers,'memcache':memcache}", "def print_statistics(self):\n\n print(\"Replay-memory statistics:\")\n\n # Print statistics for the Q-values before they were updated\n # in update_all_q_values().\n msg = \"\\tQ-values Before, Min: {0:5.2f}, Mean: {1:5.2f}, Max: {2:5.2f}\"\n print(msg.format(np.min(self.q_values_old),\n np.mean(self.q_values_old),\n np.max(self.q_values_old)))\n\n # Print statistics for the Q-values after they were updated\n # in update_all_q_values().\n msg = \"\\tQ-values After, Min: {0:5.2f}, Mean: {1:5.2f}, Max: {2:5.2f}\"\n print(msg.format(np.min(self.q_values),\n np.mean(self.q_values),\n np.max(self.q_values)))\n\n # Print statistics for the difference in Q-values before and\n # after the update in update_all_q_values().\n q_dif = self.q_values - self.q_values_old\n msg = \"\\tQ-values Diff., Min: {0:5.2f}, Mean: {1:5.2f}, Max: {2:5.2f}\"\n print(msg.format(np.min(q_dif),\n np.mean(q_dif),\n np.max(q_dif)))\n\n # Print statistics for the number of large estimation errors.\n # Don't use the estimation error for the last state in the memory,\n # because its Q-values have not been updated.\n err = self.estimation_errors[:-1]\n err_count = np.count_nonzero(err > self.error_threshold)\n msg = \"\\tNumber of large errors > {0}: {1} / {2} ({3:.1%})\"\n print(msg.format(self.error_threshold, err_count,\n self.num_used, err_count / self.num_used))\n\n # How much of the replay-memory is used by states with end_life.\n end_life_pct = np.count_nonzero(self.end_life) / self.num_used\n\n # How much of the replay-memory is used by states with end_episode.\n end_episode_pct = np.count_nonzero(self.end_episode) / self.num_used\n\n # How much of the replay-memory is used by states with non-zero reward.\n reward_nonzero_pct = np.count_nonzero(self.rewards) / self.num_used\n\n # Print those statistics.\n msg = \"\\tend_life: {0:.1%}, end_episode: {1:.1%}, reward non-zero: {2:.1%}\"\n print(msg.format(end_life_pct, end_episode_pct, reward_nonzero_pct))", "def memory_usage(where):\n mem_summary = summary.summarize(muppy.get_objects())\n print(\"Memory summary:\", where)\n summary.print_(mem_summary, limit=2)\n print(\"VM: %.2fMb\" % (get_virtual_memory_usage_kb() / 1024.0))", "def reset_memory_stats(device):\n context.context().reset_memory_stats(device)", "def record_memory_map(self):\n memory_map = self.get_memory_map()\n self._memory_map_records.append(memory_map)", "def show_mem_usage():\n gl = sys._getframe(1).f_globals\n vars = {}\n for k, v in list(gl.items()):\n # for pandas dataframes\n if hasattr(v, 'memory_usage'):\n mem = v.memory_usage(deep=True)\n if not np.isscalar(mem):\n mem = mem.sum()\n vars.setdefault(id(v), [mem]).append(k)\n # work around for a bug\n elif isinstance(v, pd.Panel):\n v = v.values\n vars.setdefault(id(v), [sys.getsizeof(v)]).append(k)\n total = 0\n for k, (value, *names) in vars.items():\n if value > 1e6:\n print(names, \"%.3fMB\" % (value / 1e6))\n total += value\n print(\"%.3fMB\" % (total / 1e6))", "def get_memory_info():\n return psutil.virtual_memory()", "def _api_memory_info() -> Dict[str, Any]:\n process = psutil.Process(os.getpid())\n return {k: size(v) for k, v in process.memory_info()._asdict().items()}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calls HeapValidate(GetProcessHeap(), 0, NULL);
def checkProcessHeap(): # Get the process heap. try: hHeap = ctypes.windll.kernel32.GetProcessHeap(); except: reporter.logXcpt(); return False; # Check it. try: fIsOkay = ctypes.windll.kernel32.HeapValidate(hHeap, 0, None); except: reporter.logXcpt(); return False; if fIsOkay == 0: reporter.log('HeapValidate failed!'); # Try trigger a dump using c:\utils\procdump64.exe. from common import utils; iPid = os.getpid(); asArgs = [ 'e:\\utils\\procdump64.exe', '-ma', '%s' % (iPid,), 'c:\\CrashDumps\\python.exe-%u-heap.dmp' % (iPid,)]; if utils.getHostArch() != 'amd64': asArgs[0] = 'c:\\utils\\procdump.exe' reporter.log('Trying to dump this process using: %s' % (asArgs,)); utils.processCall(asArgs); # Generate a crash exception. ctypes.windll.msvcrt.strcpy(None, None, 1024); return True;
[ "def _mem_heap(self):\n return False", "def test_func_heap(self):\n cmd = \"deref $_heap()\"\n target = _target(\"heap\")\n self.assertFailIfInactiveSession(gdb_run_cmd(cmd, target=target))\n res = gdb_run_silent_cmd(cmd, target=target)\n self.assertNoException(res)\n if is_64b():\n self.assertIn(\"+0x0048:\", res)\n else:\n self.assertIn(\"+0x0024:\", res)\n\n cmd = \"deref $_heap(0x10+0x10)\"\n res = gdb_run_silent_cmd(cmd, target=target)\n self.assertNoException(res)\n if is_64b():\n self.assertIn(\"+0x0048:\", res)\n else:\n self.assertIn(\"+0x0024:\", res)", "def isRestrictToExecuteMemory(program: ghidra.program.model.listing.Program) -> bool:\n ...", "def test_he_vm_memory(self):\n assert helpers.check_he_vm_memory_via_engine(\n expected_value=conf.EXPECTED_MEMORY\n )\n assert helpers.check_he_vm_memory_via_guest_os(\n expected_value=conf.EXPECTED_MEMORY\n )", "def test_validate_factorial_heap_pq(self):\n from ch04.factorial_heap import PQ, validate\n\n end = 10000\n pq = PQ(end)\n for i in range(end):\n pq.enqueue(i, i)\n validate(pq)\n\n last = end-1\n while pq:\n self.assertEqual(last, pq.dequeue())\n last -= 1\n validate(pq)", "def sanity_check_process(self):\n assert_equals(self.proc.returncode, None)\n time.sleep(1)", "def validate_type(cls, heap_type: int):\n if heap_type in [cls.MAX, cls.MIN]:\n return True\n else:\n raise InvalidHeapTypeError([{\"type\": \"MIN\", \"value\": cls.MIN}, {\"type\": \"MAX\", \"value\": cls.MAX}])", "def scan_memory( ):\n print \"START SCANNING\"\n readlimit = 100*4096\n \n skip = (\"svchost.exe\", \"iexplore.exe\", \"explorer.exe\", \"System\",\n \"smss.exe\", \"csrss.exe\", \"winlogon.exe\", \"lsass.exe\",\n \"spoolsv.exe\", \"alg.exe\", \"wuauclt.exe\", \"wininit.exe\",\n \"services.exe\", \"lsm.exe\", \"audiodg.exe\", \"dllhost.exe\",\n \"conhost.exe\", \"igfxsrvc.exe\", \"SearchFilterHost.exe\",\n \"SearchFilterHost.exe\", \"wmpnetwk.exe\", \"SearchIndexer.exe\",\n \"SearchProtocolHost.exe\", \"WUDFHost.exe\", \"dwm.exe\", \"LogonUI.exe\")\n \n hSnap = windll.kernel32.CreateToolhelp32Snapshot( TH32CS_SNAPPROCESS, 0 )\n \n pe32 = PROCESSENTRY32()\n pe32.dwSize = sizeof( PROCESSENTRY32 )\n\n ## The first pid i == 0 (System)\n windll.kernel32.Process32First( hSnap, byref( pe32 ) )\n\n ownpid= windll.kernel32.GetCurrentProcessId()\n print \"[+]PID current process: \" + str( ownpid )\n\n print \"[+]Scanning processes\"\n while True:\n if windll.kernel32.Process32Next( hSnap, byref( pe32 ) ) == 0:\n break\n \n name = pe32.szExeFile\n pid = pe32.th32ProcessID\n \n if not name in skip and pid != ownpid:\n print \"\\t[+]Name: \" + str( name ) + \" | PID: \" + str( pid )\n ## Open the process\n hProcess = windll.kernel32.OpenProcess( PROCESS_VM_READ | PROCESS_VM_OPERATION | PROCESS_QUERY_INFORMATION, 0, pid )\n \n addr = c_long(0)\n\n while True:\n MBI = MEMORY_BASIC_INFORMATION()\n windll.kernel32.VirtualQueryEx( hProcess, addr.value, byref( MBI ), sizeof( MBI ) )\n \n ## If the VirtualQueryEx call returns nothing, the max address has been reached, break\n ## If the program is run in 32bit mode and scans a 64bit process it cant read some addresses so check AllocationBase if the address is readable. \n if ( addr.value != 0 and MBI.BaseAddress == None ) or (MBI.AllocationBase == None and not Processis64( hProcess ) ):\n break\n \n ## The new addr that will be scanned \n addr.value += MBI.RegionSize\n \n if MBI.Type == MEM_PRIVATE and MBI.State == MEM_COMMIT and MBI.Protect in ( PAGE_EXECUTE_READ, PAGE_EXECUTE_READWRITE, PAGE_READWRITE ):\n #print \"\\t\\tFound good region: \" + str( MBI.BaseAddress )\n ReadAddr = 0\n while MBI.RegionSize > 0:\n \n if ReadAddr != 0:\n ReadAddr += readlimit\n\n else:\n ReadAddr = MBI.BaseAddress\n\n if MBI.RegionSize > readlimit:\n BuffSize = readlimit\n MBI.RegionSize -= readlimit\n\n else:\n BuffSize = MBI.RegionSize\n MBI.RegionSize = 0\n\n Buff = create_string_buffer( BuffSize )\n windll.kernel32.ReadProcessMemory( hProcess, ReadAddr, Buff, BuffSize, 0 )\n\n\n found = check_buffer( Buff.raw )\n if found != \"Not Found\":\n print \"\\t\\t[!]Found at address: \" + str( ReadAddr + found )\n\n windll.kernel32.CloseHandle( hProcess )\n\n windll.kernel32.CloseHandle( hSnap )", "def checkLiveMigrateMemory(self):\n overhead_kb = 0\n if arch.type == \"x86\":\n # 1MB per vcpu plus 4Kib/Mib of RAM. This is higher than \n # the minimum that Xen would allocate if no value were given.\n overhead_kb = self.info['VCPUs_max'] * 1024 + \\\n (self.info['memory_static_max'] / 1024 / 1024) * 4\n overhead_kb = ((overhead_kb + 1023) / 1024) * 1024\n # The domain might already have some shadow memory\n overhead_kb -= xc.shadow_mem_control(self.domid) * 1024\n if overhead_kb > 0:\n balloon.free(overhead_kb, self)", "def test_static_is_heap(self):\n good = [4, 4, 8, 9, 4, 12, 9, 11, 13]\n bad = [1,2,3,114,5,6,7,8,9,10]\n\n self.assertTrue(Heap.is_heap(good), 'should hold the heap property')\n self.assertFalse(Heap.is_heap(bad), 'should not hold the heap property')", "def _checkHandleLeak(self, func, *args, **kwargs):\n # Grab the current process\n proc = psutil.Process()\n\n func(*args, **kwargs)\n middle = proc.num_handles()\n\n func(*args, **kwargs)\n end = proc.num_handles()\n\n self.assertEquals(end - middle, 0)", "def CalcNewErrorMeasures(self):\n for p in self.Active[:self.N_Active]:\n if self.Errors[p] < 0.0:\n #print self.CalcErrorMeasure(p), p\n self.Errors[p] = self.CalcErrorMeasure(p)\n # Add new values to the heap\n self.Active[:self.heap_length+1],dummy= maxheap.heap_insert(self.Errors[:self.N_Idx], \n p, self.Active[:self.heap_length+1],\n self.heap_length)\n self.heap_length +=1\n \n if self.heap_length != self.N_Active:\n raise ValueError", "def hxlvalidate():\n run_script(hxlvalidate_main)", "def test_heap_instaniated_with_bool():\n from binary_heap import BinaryHeap\n with pytest.raises(TypeError):\n BinaryHeap(False)", "def validate(self):\n\t\tif not self.process.is_alive():\n\t\t\t# Ugh!\n\t\t\tprint_t(\"Process for '%s' died!\" % self.frontend)\n\t\t\tself.process.join()\n\t\t\tprint_t(\"Attempting restart of '%s'!\" % self.frontend)\n\t\t\tself.start()", "def CleanUp(self):\n if self.process != 0 and self.mem_address != 0:\n # free up the memory we allocated\n #win32api.SetLastError(0)\n self.CheckGuardSignature()\n\n ret = win32functions.VirtualFreeEx(\n c_void_p(self.process),\n c_void_p(self.mem_address),\n win32structures.ULONG_PTR(0),\n wintypes.DWORD(win32defines.MEM_RELEASE))\n if ret == 0:\n print('Error: CleanUp: VirtualFreeEx() returned zero for address ', hex(self.mem_address))\n last_error = win32api.GetLastError()\n print('LastError = ', last_error, ': ', win32api.FormatMessage(last_error).rstrip())\n sys.stdout.flush()\n self._CloseHandle()\n raise WinError()\n self.mem_address = 0\n self._CloseHandle()\n else:\n pass # ActionLogger().log('\\nWARNING: Cannot call VirtualFreeEx! process_id == 0.')", "def minHeap(self):\n for pos in range(self.size // 2, 0, -1):\n self.minHeapify(pos)", "def test_monitor_process_traps_nosuchprocess(self, test_system_status):\n test_system_status.mocked_procs[0].memory_info.side_effect = psutil.NoSuchProcess('')\n test_system_status.system_status.monitor_processes()", "def bad_cgroup_processes_check():\n return CGCheck([], bad_cgroup_processes)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs the component. The Annual Total Savings,Annual Costs, Annual Net Benefit, NPV Benefits, NPV Costs, NPV Net Benefits, Benefit Cost Ratio, Levelized Cost of Energy, and Internal Rate of Return will all be calculated. There must be a known Heat Recovery project for this component to run.
def run (self, scalers = {'capital costs':1.0}): self.was_run = True self.reason = "OK" tag = self.cd['file id'].split('+') if len(tag) > 1 and tag[1] != 'transmission': self.was_run = False self.reason = "Not a transmission project." return if not self.cd["model electricity"]: self.was_run = False self.reason = "Electricity must be modeled to analyze "+\ "transmission. It was not for this community." return if np.isnan(float(self.comp_specs['distance to community'])): self.was_run = False self.reason = ("There are no communities within 30 miles with" " lower cost of electricity.") return self.calc_average_load() try: self.get_intertie_values() except ValueError: self.was_run = False self.reason = ("Could not find data on community to intertie to.") return self.calc_pre_intertie_generation() self.calc_intertie_offset_generation() if self.cd["model heating fuel"]: # change these below self.calc_lost_heat_recovery() # see NOTE* #~ return if self.cd["model financial"]: # AnnualSavings functions (don't need to write) self.get_diesel_prices() # change these below self.calc_capital_costs() self.calc_annual_electric_savings() self.calc_annual_heating_savings() # AnnualSavings functions (don't need to write) self.calc_annual_total_savings() self.calc_annual_costs(self.cd['interest rate'], scalers['capital costs']) self.calc_annual_net_benefit() self.calc_npv(self.cd['discount rate'], self.cd["current year"]) #~ print self.benefit_cost_ratio self.calc_levelized_costs(self.proposed_generation_cost)
[ "def run (self, scalers = {'capital costs':1.0}):\n self.was_run = True\n self.reason = \"OK\"\n\n tag = self.cd['file id'].split('+')\n if len(tag) > 1 and tag[1] != 'residential':\n self.was_run = False\n self.reason = \"Not a residential project.\"\n return\n\n # needed for electric or HF component and has a default value\n self.calc_avg_consumption()\n if self.cd[\"model electricity\"]:\n\n self.calc_baseline_kWh_consumption()\n self.calc_proposed_kWh_consumption()\n\n if self.cd[\"model heating fuel\"]:\n #~ self.calc_init_HH()\n self.calc_savings_opportunities()\n self.calc_init_consumption()\n self.calc_baseline_fuel_consumption()\n self.calc_proposed_fuel_consumption()\n #~ self.set_forecast_columns()\n\n if self.cd[\"model financial\"]:\n self.calc_capital_costs()\n\n self.get_diesel_prices()\n self.calc_baseline_fuel_cost()\n self.calc_proposed_fuel_cost()\n self.calc_baseline_kWh_cost()\n self.calc_proposed_kWh_cost()\n\n\n self.calc_annual_electric_savings()\n self.calc_annual_heating_savings()\n self.calc_annual_total_savings()\n\n self.calc_annual_costs(self.cd['interest rate'],\n scalers['capital costs'])\n self.calc_annual_net_benefit()\n\n self.calc_npv(self.cd['discount rate'], self.cd['current year'])\n self.calc_levelized_costs(0)", "def run (self, scalers = {'capital costs':1.0}):\n\n self.was_run = True\n self.reason = \"OK\"\n tag = self.cd['file id'].split('+')\n if len(tag) > 1 and tag[1] != 'wind':\n self.was_run = False\n self.reason = \"Not a Wind project\"\n return\n\n try:\n #~ self.generation = self.forecast.get_generation(self.start_year)\n self.calc_average_load()\n self.calc_generation_wind_proposed()\n except AttributeError:\n self.diagnostics.add_warning(self.component_name,\n \"could not be run\")\n self.was_run = False\n self.reason = (\"Could not Calculate average load or \"\n \"proposed generation\")\n return\n\n\n\n\n #~ #~ print self.comp_specs['wind class']\n # ??? some kind of failure message\n if self.average_load is None or \\\n (self.average_load > self.comp_specs['average load limit'] and \\\n self.load_offset_proposed > 0):\n #~ float(self.comp_specs['wind class']) > \\\n #~ self.comp_specs['minimum wind class'] and \\\n\n # if the average load is greater that the lower limit run this component\n # else skip\n\n self.calc_transmission_losses()\n self.calc_excess_energy()\n self.calc_net_generation_wind()\n self.calc_electric_diesel_reduction()\n self.calc_diesel_equiv_captured()\n self.calc_loss_heat_recovery()\n self.calc_reduction_diesel_used()\n\n\n if self.cd[\"model financial\"]:\n # AnnualSavings functions (don't need to write)\n self.get_diesel_prices()\n\n # change these below\n self.calc_capital_costs()\n self.calc_maintenance_cost()\n self.calc_annual_electric_savings()\n self.calc_annual_heating_savings()\n\n # AnnualSavings functions (don't need to write)\n self.calc_annual_total_savings()\n self.calc_annual_costs(self.cd['interest rate'],\n scalers['capital costs'])\n self.calc_annual_net_benefit()\n self.calc_npv(self.cd['discount rate'], self.cd[\"current year\"])\n #~ print self.benefit_cost_ratio\n self.calc_levelized_costs(self.maintenance_cost)\n else:\n #~ print \"wind project not feasible\"\n self.was_run = False\n if self.load_offset_proposed <= 0:\n self.reason = \"Proposed load offset less than 0\"\n else:\n self.reason = \\\n \"Average load too small for viable wind generation.\"\n self.diagnostics.add_note(self.component_name,\n \"communities average load is not large enough to consider project\")\n #~ print self.benefit_cost_ratio", "def calc_annual_electric_savings (self):\n costs = self.comp_specs['diesel generator o&m']\n\n for kW in costs.keys():\n try:\n if self.average_load < int(kW):\n maintenance = self.comp_specs['diesel generator o&m'][kW]\n break\n except ValueError:\n maintenance = self.comp_specs['diesel generator o&m'][kW]\n\n self.baseline_generation_cost = maintenance + \\\n (self.pre_intertie_generation_fuel_used * self.diesel_prices)\n\n maintenance = self.capital_costs * \\\n (self.comp_specs['percent o&m'] / 100.0)\n self.proposed_generation_cost = maintenance + \\\n self.intertie_offset_generation_fuel_used * \\\n self.intertie_diesel_prices\n self.annual_electric_savings = self.baseline_generation_cost -\\\n self.proposed_generation_cost\n #~ print len(self.annual_electric_savings)\n #~ print 'self.annual_electric_savings',self.annual_electric_savings", "def run(self):\n self.investment_loop()", "def calc_annual_electric_savings (self):\n price = self.diesel_prices\n #TODO add rural v non rural\n self.base_generation_cost = self.electric_diesel_reduction * price\n\n\n self.proposed_generation_cost = self.maintenance_cost\n\n self.annual_electric_savings = self.base_generation_cost - \\\n self.proposed_generation_cost\n #~ print 'self.annual_electric_savings',self.annual_electric_savings", "def run_module(self):\n try:\n self.calculate_costs(self.input_dict, self.output_dict)\n self.outputs_for_detailed_tab(self.input_dict, self.output_dict)\n # self.outputs_for_module_type_operation(self.input_dict, self.output_dict)\n self.output_dict['substation_module_type_operation'] = self.outputs_for_costs_by_module_type_operation(\n input_df=self.output_dict['substation_cost_output_df'],\n project_id=self.project_name,\n total_or_turbine=True\n )\n return 0, 0\n except Exception as error:\n traceback.print_exc()\n print(f\"Fail {self.project_name} SubstationCost\")\n return 1, error", "def calc_savings_opportunities (self):\n rd = self.comp_specs['data']\n ## #HH\n self.opportunity_HH = self.init_HH -rd[\"BEES Number\"] -rd[\"Post-Retrofit Number\"]\n self.opportunity_HH = np.float64( self.opportunity_HH )\n #~ print self.opportunity_HH\n if self.opportunity_HH < 0:\n self.opportunity_HH = 0\n self.diagnostics.add_note(self.component_name,\n \"calculate Houses to retrofit was negative, setting to 0\" )\n\n ## % as decimal\n #~ self.percent_savings = rd[\"opportunity_total_percent_community_savings\"]\n #~ self.percent_savings = np.float64( self.percent_savings)\n area = np.float64(rd[\"Pre-Retrofit Avg Area (SF)\"])\n EUI = np.float64(rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"])\n avg_EUI_reduction = np.float64(rd[\"Post-Retrofit Avg. EUI Reduction\"])\n\n total = area * EUI\n\n\n # the one in each of these function calls is an identity\n amnt = np.float64(rd[\"Fuel Oil\"]) / 100.0\n self.savings_HF = avg_EUI_reduction * self.opportunity_HH * \\\n self.calc_consumption_by_fuel(amnt, total, 1,\n constants.mmbtu_to_gal_HF)\n\n amnt = np.float64(rd[\"Wood\"]) / 100.0\n self.savings_wood = avg_EUI_reduction * self.opportunity_HH * \\\n self.calc_consumption_by_fuel(amnt, total, 1,\n constants.mmbtu_to_cords)\n\n amnt = np.float64(rd[\"Utility Gas\"]) / 100.0\n self.savings_gas = avg_EUI_reduction * self.opportunity_HH * \\\n self.calc_consumption_by_fuel(amnt, total, 1,\n constants.mmbtu_to_Mcf)\n\n amnt = np.float64(rd[\"LP\"]) / 100.0\n self.savings_LP = avg_EUI_reduction * self.opportunity_HH * \\\n self.calc_consumption_by_fuel(amnt, total, 1,\n constants.mmbtu_to_gal_LP)\n\n amnt = np.float64(rd[\"Electricity\"]) / 100.0\n self.savings_kWh = avg_EUI_reduction * self.opportunity_HH * \\\n self.calc_consumption_by_fuel(amnt, total, 1,\n constants.mmbtu_to_kWh)\n #~ self.savings_coal\n #~ self.savings_solar\n #~ self.savings_other\n\n self.savings_mmbtu = self.savings_HF * (1/constants.mmbtu_to_gal_HF) +\\\n self.savings_wood * (1/constants.mmbtu_to_cords) +\\\n self.savings_gas * (1/constants.mmbtu_to_Mcf) +\\\n self.savings_kWh * (1/constants.mmbtu_to_kWh) +\\\n self.savings_LP* (1/constants.mmbtu_to_gal_LP)", "def calc_baseline_fuel_consumption (self):\n rd = self.comp_specs['data']\n self.fuel_oil_percent = rd[\"Fuel Oil\"] / 100.0\n HH = self.households\n #~ print HH\n area = np.float64(rd[\"Pre-Retrofit Avg Area (SF)\"])\n EUI = np.float64(rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"])\n\n scaler = (HH - self.init_HH) * area * EUI\n\n self.baseline_fuel_Hoil_consumption = \\\n self.init_HF+np.float64(rd[\"Fuel Oil\"]/100.0)*\\\n scaler * constants.mmbtu_to_gal_HF\n self.baseline_fuel_wood_consumption = \\\n self.init_wood+np.float64(rd[\"Wood\"]/100.0)*\\\n scaler * constants.mmbtu_to_cords\n self.baseline_fuel_gas_consumption = self.init_gas + \\\n np.float64(rd[\"Utility Gas\"]/100.0) * \\\n scaler * constants.mmbtu_to_Mcf\n self.baseline_fuel_LP_consumption = \\\n self.init_LP+np.float64(rd[\"LP\"]/100.0)*\\\n scaler * constants.mmbtu_to_gal_LP\n self.baseline_fuel_kWh_consumption = self.init_kWh+\\\n np.float64(rd[\"Electricity\"]/100.0)*\\\n scaler * constants.mmbtu_to_kWh\n #~ self.baseline_fuel_coal_consumption\n #~ self.baseline_fuel_solar_consumption\n #~ self.baseline_fuel_other_consumption\n if self.cd['natural gas price'] == 0:\n self.baseline_fuel_gas_consumption = 0\n\n self.baseline_HF_consumption = \\\n self.baseline_fuel_Hoil_consumption * \\\n (1/constants.mmbtu_to_gal_HF) +\\\n self.baseline_fuel_wood_consumption * \\\n (1/constants.mmbtu_to_cords) +\\\n self.baseline_fuel_gas_consumption * (1/constants.mmbtu_to_Mcf) +\\\n self.baseline_fuel_kWh_consumption * (1/constants.mmbtu_to_kWh) +\\\n self.baseline_fuel_LP_consumption * (1/constants.mmbtu_to_gal_LP)", "def calc_monthly_cash(self):\n # shortcut to self\n s = self\n\n # Start the DataFrames, base and w/ heat pump\n # Each starts with just an index column with the month\n # Make shortcut variables as well.\n s.df_mo_dol_base = dfb = s.df_mo_en_base[[]].copy()\n s.df_mo_dol_hp = dfh = s.df_mo_en_base[[]].copy()\n\n # Determine the base electric use by month. Approach is different \n # if there is electric heat.\n is_electric_heat = (s.exist_heat_fuel_id == constants.ELECTRIC_ID)\n if not is_electric_heat:\n # Fuel-based space heat.\n # The User supplied a January and a May kWh usage value that should\n # be used for the base case (no heat pump) total electricity use.\n # But, need to come up with a kWh value for every month. Do that by\n # adjusting the kWh pattern available for this city.\n #\n # Determine the multiplier to adjust to the pattern to the actual.\n pat_use = np.array(s.city.avg_elec_usage)\n mult = (s.elec_use_jan - s.elec_use_may) / (pat_use[0] - pat_use[4])\n pat_use = mult * pat_use\n pat_use += s.elec_use_jan - pat_use[0]\n\n # The electricity use in the base case\n dfb['elec_kwh'] = pat_use\n\n # rough estimate of a base demand: not super critical, as the demand rate \n # structure does not have blocks. Assume a load factor of 0.4\n dfb['elec_kw'] = dfb.elec_kwh / (DAYS_IN_MONTH * 24.0) / 0.4\n\n else:\n # Electric Heat Case\n # No Jan and May values are provided. Instead we have possibly some\n # DHW, clothes drying, and cooking. Plus, we have base lights/other appliances.\n # And finally we have the Elecric heat making up the base electric usage.\n\n # First, DHW, Clothes Drying and Cooking. Assume flat use through year.\n # This is a numpy array because DAYS_IN_MONTH is an array.\n elec_kwh = s.fuel_other_uses / 8760.0 * DAYS_IN_MONTH * 24.0\n\n # Now lights and other misc. appliances. Some monthly variation, given\n # by LIGHTS_OTHER_PAT.\n elec_kwh += s.lights_other_elec / 8760.0 * LIGHTS_OTHER_PAT * DAYS_IN_MONTH * 24.0\n\n # For the peak demand of those two categories of use, just assume 40% load factor.\n elec_kw = elec_kwh / (DAYS_IN_MONTH * 24.0) / 0.4\n\n # Now add in space heating kWh and kW\n elec_kwh += s.df_mo_en_base.total_kwh.values\n elec_kw += s.df_mo_en_base.total_kw.values\n\n # store results\n dfb['elec_kwh'] = elec_kwh\n dfb['elec_kw'] = elec_kw\n\n # Make an object to calculate electric utility costs\n elec_cost_calc = ElecCostCalc(s.utility, sales_tax=s.sales_tax, pce_limit=s.pce_limit)\n # cost function that will be applied to each row of the cost DataFrame\n cost_func = lambda r: elec_cost_calc.monthly_cost(r.elec_kwh, r.elec_kw)\n\n dfb['elec_dol'] = dfb.apply(cost_func, axis=1)\n\n if not is_electric_heat:\n # Now fuel use by month. Remember that the home heat model only looked at\n # space heating, so we need to add in the fuel use from the other end uses\n # that use this fuel.\n dfb['secondary_fuel_units'] = s.df_mo_en_base.secondary_fuel_units + \\\n s.fuel_other_uses / 12.0\n dfb['secondary_fuel_dol'] = dfb.secondary_fuel_units * s.exist_unit_fuel_cost * (1. + s.sales_tax)\n else:\n # Electric Heat, so no secondary fuel\n dfb['secondary_fuel_units'] = 0.0\n dfb['secondary_fuel_dol'] = 0.0\n\n # Total Electric + space heat\n dfb['total_dol'] = dfb.elec_dol + dfb.secondary_fuel_dol\n\n # Now with the heat pump\n # determine extra kWh used in the heat pump scenario. Note, this will\n # be negative numbers if the base case used electric heat.\n extra_kwh = (s.df_mo_en_hp.total_kwh - s.df_mo_en_base.total_kwh).values\n dfh['elec_kwh'] = dfb['elec_kwh'] + extra_kwh\n extra_kw = (s.df_mo_en_hp.total_kw - s.df_mo_en_base.total_kw).values\n dfh['elec_kw'] = dfb['elec_kw'] + extra_kw\n dfh['elec_dol'] = dfh.apply(cost_func, axis=1)\n\n # Now fuel, including other end uses using the heating fuel\n if not is_electric_heat:\n dfh['secondary_fuel_units'] = s.df_mo_en_hp.secondary_fuel_units + \\\n s.fuel_other_uses / 12.0\n dfh['secondary_fuel_dol'] = dfh.secondary_fuel_units * s.exist_unit_fuel_cost * (1. + s.sales_tax)\n else:\n # Electric Heat, so no secondary fuel\n dfh['secondary_fuel_units'] = 0.0\n dfh['secondary_fuel_dol'] = 0.0\n\n # Total Electric + space heat\n dfh['total_dol'] = dfh.elec_dol + dfh.secondary_fuel_dol", "def back_calc_workflow():\n\n # site_csv = r\"C:\\Users\\Ginger\\Dropbox\\NatCap_backup\\Forage_model\\CENTURY4.6\\Kenya\\input\\regional_properties\\regional_properties.csv\"\n # run_baseline(site_csv)\n # combine_summary_files(site_csv)\n # match_csv = r\"C:\\Users\\Ginger\\Dropbox\\NatCap_backup\\Forage_model\\Data\\Kenya\\From_Sharon\\Processed_by_Ginger\\regional_PDM_summary.csv\"\n # back_calc_mgmt(match_csv)\n # move_input_files()\n # back_calc_forward(match_csv, 'GLPC')\n # summarize_sch_wrapper(match_csv)\n # save_as = r\"C:\\Users\\Ginger\\Dropbox\\NatCap_backup\\Forage_model\\Forage_model\\model_results\\regional_properties\\forward_from_2014\\back_calc_match_summary_2015.csv\"\n # summarize_match(match_csv, save_as)\n # run_preset_densities()\n # summarize_offtake()\n summarize_remaining_biomass()", "def investment_calc(self, investment):\n \n \n #muni basis each year is the muni_amount + all prior year's interest\n self.muni_appreciated = round(investment[0] * (1+self.muni_roi),2)\n self.equity_appreciated = round(investment[1] * (1+self.equity_roi),2)\n \n #Interest and Dividends, pretax\n self.pretax_interest = round(investment[0]*self.muni_int,2)\n self.pretax_dividends = round(investment[1]*self.equity_div,2)\n \n muni_percent = self.pretax_interest/(self.pretax_interest+self.pretax_dividends)\n \n #Corp Tax Schedule\n if self.pretax_dividends+self.pretax_interest <= 50000:\n self.muni_int_earned = round(self.pretax_interest*(1-15/100),2)\n self.equity_div_earned = round(self.pretax_dividends*(1-15/100),2)\n elif (self.pretax_dividends+self.pretax_interest > 50000) & (self.pretax_dividends+self.pretax_interest <= 75000):\n #Will split the amount proportionally.\n self.muni_int_earned = round((self.pretax_interest-(7500*muni_percent))*(1-25/100),2)\n self.equity_div_earned = round((self.pretax_dividends-(7500*(1-muni_percent)))*(1-25/100),2)\n elif (self.pretax_dividends+self.pretax_interest > 75000) & (self.pretax_dividends+self.pretax_interest <= 100000):\n self.muni_int_earned = round((self.pretax_interest-(13750*muni_percent))*(1-34/100),2)\n self.equity_div_earned = round((self.pretax_dividends-(13750*(1-muni_percent)))*(1-34/100),2)\n elif (self.pretax_dividends+self.pretax_interest > 100000) & (self.pretax_dividends+self.pretax_interest <= 335000):\n self.muni_int_earned = round((self.pretax_interest-(22250*muni_percent))*(1-39/100),2)\n self.equity_div_earned = round((self.pretax_dividends-(22250*(1-muni_percent)))*(1-39/100),2)\n elif (self.pretax_dividends+self.pretax_interest > 335000) & (self.pretax_dividends+self.pretax_interest <= 10000000):\n self.muni_int_earned = round((self.pretax_interest-(113900*muni_percent))*(1-34/100),2)\n self.equity_div_earned = round((self.pretax_dividends-(113900*(1-muni_percent)))*(1-34/100),2)\n elif (self.pretax_dividends+self.pretax_interest > 10000000) & (self.pretax_dividends+self.pretax_interest <= 15000000):\n self.muni_int_earned = round((self.pretax_interest-(3400000*muni_percent))*(1-35/100),2)\n self.equity_div_earned = round((self.pretax_dividends-(3400000*(1-muni_percent)))*(1-35/100),2)\n elif (self.pretax_dividends+self.pretax_interest > 15000000) & (self.pretax_dividends+self.pretax_interest <= 18333333):\n self.muni_int_earned = round((self.pretax_interest-(5150000*muni_percent))*(1-38/100),2)\n self.equity_div_earned = round((self.pretax_dividends-(5150000*(1-muni_percent)))*(1-38/100),2)\n \n else:\n self.muni_int_earned = round(self.pretax_interest*(1-35/100),2)\n self.equity_div_earned = round(self.pretax_dividends*(1-35/100),2)\n return (self.muni_appreciated, self.muni_int_earned, self.equity_appreciated, self.equity_div_earned)", "def calc_cash_flow(self):\n\n if any(s in self.tariff.solar_rate_name for s in ['self_con', 'Self_Con', 'sc', 'SC']):\n # IFF solar tariff paid to secondary solar retailer for self-consumed generation\n # and export FiT paid for exported generation\n # NB cost of exported self generation is received from retailer and passed to PV seller, so zero net effect\n # Energy flows treated as if PV is owned by customer\n self.local_solar_bill = (np.multiply(self.local_consumption, self.tariff.solar_import_tariff) + \\\n np.multiply(self.exports, self.tariff.export_tariff)).sum()\n else:\n self.local_solar_bill = 0.0\n\n if self.tariff.is_dynamic:\n # ------------------------------------\n # calculate tariffs and costs stepwise\n # ------------------------------------\n for step in np.arange(0, self.ts.get_num_steps()):\n # print(step)\n # --------------------------------------------------------------\n # Solar Block Daily Tariff : Calculate energy used at solar rate\n # --------------------------------------------------------------\n # Fixed daily allocation (set as % of annual generation) charged at solar rate,\n # residual is at underlying, e.g. TOU\n if 'Solar_Block_Daily' in self.tariff.tariff_type:\n print('Solar_Block_Daily NOT SUPPORTED')\n sys.exit('Solar_Block_Daily NOT SUPPORTED')\n # SOLAR BLOCK DAILY REMOVED\n # steps_today = ts.steps_today(step)\n # # Cumulative Energy for this day:\n # cumulative_energy = self.imports[steps_today].sum()\n # if len(steps_today) <= 1:\n # previous_energy = 0\n # else:\n # previous_energy = self.imports[steps_today[:-1]].sum()\n # # Allocate local solar allocation depending on cumulative energy relative to quota:\n # if cumulative_energy <= self.daily_local_quota:\n # self.solar_allocation[step] = self.imports[step]\n # elif previous_energy < self.daily_local_quota \\\n # and cumulative_energy > self.daily_local_quota:\n # self.solar_allocation[step] = self.daily_local_quota - previous_energy\n # else:\n # self.solar_allocation[step] = 0\n else:\n # ---------------------------------------------------------\n # For Block Tariffs, calc volumetric charges for each block\n # ---------------------------------------------------------\n # Block Quarterly Tariff\n # ----------------------\n if self.tariff.tariff_type == 'Block_Quarterly':\n steps_since_reset = np.mod((step - self.tariff.block_billing_start),\n self.tariff.steps_in_block) # to include step0\n cumulative_energy = self.imports[\n step - steps_since_reset:step + 1].sum() # NB only adds to step\n if steps_since_reset == 0:\n previous_energy = 0\n else:\n previous_energy = self.imports[step - steps_since_reset:step].sum() # NB adds to step-1\n\n # Block Daily Tariff\n # -------------------\n elif self.tariff.tariff_type == 'Block_Daily':\n steps_today = self.ts.steps_today(step)\n cumulative_energy = self.imports[steps_today].sum()\n if len(steps_today) <= 1:\n previous_energy = 0\n else:\n previous_energy = self.imports[steps_today[:-1]].sum()\n\n if cumulative_energy - previous_energy - self.imports[step] > 0.01:\n print('accumulation error')\n # All Block Tariffs:\n # -----------------\n if cumulative_energy <= self.tariff.high_1:\n self.import_charge[step] = self.imports[step] * self.tariff.block_rate_1\n elif previous_energy < self.tariff.high_1 and cumulative_energy <= self.tariff.high_2:\n self.import_charge[step] = (self.tariff.high_1 - previous_energy) * self.tariff.block_rate_1 + \\\n (cumulative_energy - self.tariff.high_1) * self.tariff.block_rate_2\n elif previous_energy > self.tariff.high_1 and cumulative_energy <= self.tariff.high_2:\n self.import_charge[step] = self.imports[step] * self.tariff.block_rate_2\n elif previous_energy < self.tariff.high_2 and cumulative_energy > self.tariff.high_2:\n self.import_charge[step] = (self.tariff.high_2 - previous_energy) * self.tariff.block_rate_2 + \\\n (cumulative_energy - self.tariff.high_2) * self.tariff.block_rate_3\n elif previous_energy >= self.tariff.high_2:\n self.import_charge[step] = self.imports[step] * self.tariff.block_rate_3\n elif previous_energy < self.tariff.high_1 and cumulative_energy > self.tariff.high_2:\n self.import_charge[step] = (self.tariff.high_1 - previous_energy) * self.tariff.block_rate_1 + \\\n (\n self.tariff.high_2 - self.tariff.high_1) * self.tariff.block_rate_2 + \\\n (cumulative_energy - self.tariff.high_2) * self.tariff.block_rate_3\n\n # -------------------------------------------------------------\n # calculate costs using array for static and underlying tariffs\n # -------------------------------------------------------------\n if self.tariff.tariff_type == 'Solar_Block_Daily' or not self.tariff.is_dynamic:\n self.import_charge = np.multiply((self.imports - self.solar_allocation), self.tariff.import_tariff)\n # For all dynamic and static tariffs:\n # -----------------------------------\n self.cash_flows = self.import_charge \\\n + np.multiply(self.solar_allocation, self.tariff.solar_import_tariff) \\\n - np.multiply(self.exports, self.tariff.export_tariff)\n # - np.multiply(self.local_exports, self.tariff.local_export_tariff) could be added for LET / P2P\n # (These are all 1x17520 Arrays.)\n\n self.energy_bill = self.cash_flows.sum() + self.tariff.fixed_charge * self.ts.get_num_days() + self.demand_charge\n\n if self.name == 'retailer':\n self.total_payment = self.energy_bill\n else:\n # capex, opex in $, energy in c (because tariffs in c/kWh)\n self.total_payment = self.energy_bill + \\\n self.local_solar_bill + \\\n (self.pv_capex_repayment +\n self.en_capex_repayment +\n self.en_opex +\n self.bat_capex_repayment) * 100\n \n\n # --------\n # Calc NPV\n # --------\n self.npv = -sum(self.total_payment / (1 + self.scenario.a_rate / 12) ** t\n for t in np.arange(1, 12 * self.scenario.a_term))", "def calc_annual_heating_savings (self):\n price = (self.diesel_prices + self.cd['heating fuel premium'])\n\n #~ self.base_heating_cost =\n\n #~ self.proposed_heating_cost =\n\n\n\n\n self.annual_heating_savings = self.reduction_diesel_used * price\n #~ print 'self.annual_heating_savings',self.annual_heating_savings", "def calc_init_consumption (self):\n rd = self.comp_specs['data']\n ## total consumption\n total = rd[\"Total Consumption (MMBtu)\"] + \\\n rd[\"BEES Total Consumption (MMBtu)\"] + \\\n rd[\"Pre-Retrofit Avg Area (SF)\"] * \\\n rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"] * self.opportunity_HH\n #~ self.baseline_total_energy_consumption = total\n HH = self.init_HH\n\n percent_accounted = 0\n\n amnt = np.float64(rd[\"Fuel Oil\"]) / 100.0\n percent_accounted += amnt\n self.init_HF = self.calc_consumption_by_fuel(amnt, total, HH,\n constants.mmbtu_to_gal_HF)\n amnt = np.float64(rd[\"Wood\"]) / 100.0\n percent_accounted += amnt\n self.init_wood = self.calc_consumption_by_fuel(amnt, total, HH,\n constants.mmbtu_to_cords)\n\n amnt = np.float64(rd[\"Utility Gas\"]) / 100.0\n percent_accounted += amnt\n self.init_gas = self.calc_consumption_by_fuel(amnt, total, HH,\n constants.mmbtu_to_Mcf)\n\n amnt = np.float64(rd[\"LP\"]) / 100.0\n percent_accounted += amnt\n self.init_LP = self.calc_consumption_by_fuel(amnt, total, HH,\n constants.mmbtu_to_gal_LP)\n\n amnt = np.float64(rd[\"Electricity\"]) / 100.0\n percent_accounted += amnt\n self.init_kWh = self.calc_consumption_by_fuel(amnt, total, HH,\n constants.mmbtu_to_kWh)\n #~ self.init_coal\n #~ self.init_solar\n #~ self.init_other\n\n msg = str(round(percent_accounted)) + \\\n \" of residential fuel sources accounted for\"\n self.diagnostics.add_note(self.component_name, msg)", "def execute(self):\n \n # initialize input parameters\n self.hubHt = self.hub_height\n self.ratedPower = self.machine_rating\n self.maxTipSpd = self.max_tip_speed\n self.rotorDiam = self.rotor_diameter\n self.maxCp = self.max_power_coefficient\n self.maxTipSpdRatio = self.opt_tsr\n self.cutInWS = self.cut_in_wind_speed\n self.cutOutWS = self.cut_out_wind_speed\n self.altitude = self.altitude\n\n if self.air_density == 0.0: \n # Compute air density \n ssl_pa = 101300 # std sea-level pressure in Pa\n gas_const = 287.15 # gas constant for air in J/kg/K\n gravity = 9.80665 # standard gravity in m/sec/sec\n lapse_rate = 0.0065 # temp lapse rate in K/m\n ssl_temp = 288.15 # std sea-level temp in K\n \n air_density = (ssl_pa * (1-((lapse_rate*(self.altitude + self.hubHt))/ssl_temp))**(gravity/(lapse_rate*gas_const))) / \\\n (gas_const*(ssl_temp-lapse_rate*(self.altitude + self.hubHt)))\n else:\n \t\tair_density = self.air_density\n\n # determine power curve inputs\n self.reg2pt5slope = 0.05\n \n #self.max_efficiency = self.drivetrain.getMaxEfficiency()\n self.ratedHubPower = self.ratedPower / self.max_efficiency # RatedHubPower\n\n self.omegaM = self.maxTipSpd/(self.rotorDiam/2.) # Omega M - rated rotor speed\n omega0 = self.omegaM/(1+self.reg2pt5slope) # Omega 0 - rotor speed at which region 2 hits zero torque\n Tm = self.ratedHubPower*1000/self.omegaM # Tm - rated torque\n\n # compute rated rotor speed\n self.ratedRPM = (30./pi) * self.omegaM\n \n # compute variable-speed torque constant k\n kTorque = (air_density*pi*self.rotorDiam**5*self.maxCp)/(64*self.maxTipSpdRatio**3) # k\n \n b = -Tm/(self.omegaM-omega0) # b - quadratic formula values to determine omegaT\n c = (Tm*omega0)/(self.omegaM-omega0) # c\n \n # omegaT is rotor speed at which regions 2 and 2.5 intersect\n # add check for feasibility of omegaT calculation 09/20/2012\n omegaTflag = True\n if (b**2-4*kTorque*c) > 0:\n omegaT = -(b/(2*kTorque))-(np.sqrt(b**2-4*kTorque*c)/(2*kTorque)) # Omega T\n #print [kTorque, b, c, omegaT]\n \n windOmegaT = (omegaT*self.rotorDiam)/(2*self.maxTipSpdRatio) # Wind at omegaT (M25)\n pwrOmegaT = kTorque*omegaT**3/1000 # Power at ometaT (M26)\n\n else:\n omegaTflag = False\n windOmegaT = self.ratedRPM\n pwrOmegaT = self.ratedPower\n\n # compute rated wind speed\n d = air_density*np.pi*self.rotorDiam**2.*0.25*self.maxCp\n self.ratedWindSpeed = \\\n 0.33*( (2.*self.ratedHubPower*1000. / ( d))**(1./3.) ) + \\\n 0.67*( (((self.ratedHubPower-pwrOmegaT)*1000.) / (1.5*d*windOmegaT**2.)) + windOmegaT )\n\n # set up for idealized power curve\n n = 161 # number of wind speed bins\n itp = [None] * n\n ws_inc = 0.25 # size of wind speed bins for integrating power curve\n Wind = []\n Wval = 0.0\n Wind.append(Wval)\n for i in xrange(1,n):\n Wval += ws_inc\n Wind.append(Wval)\n\n # determine idealized power curve \n self.idealPowerCurve (Wind, itp, kTorque, windOmegaT, pwrOmegaT, n , omegaTflag)\n\n # add a fix for rated wind speed calculation inaccuracies kld 9/21/2012\n ratedWSflag = False\n # determine power curve after losses\n mtp = [None] * n\n for i in xrange(0,n):\n mtp[i] = itp[i] #* self.drivetrain.getDrivetrainEfficiency(itp[i],self.ratedHubPower)\n #print [Wind[i],itp[i],self.drivetrain.getDrivetrainEfficiency(itp[i],self.ratedHubPower),mtp[i]] # for testing\n if (mtp[i] > self.ratedPower):\n if not ratedWSflag:\n ratedWSflag = True\n mtp[i] = self.ratedPower\n\n self.rated_wind_speed = self.ratedWindSpeed\n self.rated_rotor_speed = self.ratedRPM\n self.power_curve = mtp\n self.wind_curve = Wind\n\n # compute turbine load outputs\n self.rotor_torque = self.ratedHubPower/(self.ratedRPM*(pi/30.))*1000.\n self.rotor_thrust = air_density * self.thrust_coefficient * pi * self.rotor_diameter**2 * (self.ratedWindSpeed**2) / 8.", "def calculate(self, technologies, value_streams, results, opt_years):\n self.initiate_cost_benefit_analysis(technologies, value_streams)\n super().calculate(self.ders, self.value_streams, results, opt_years)\n self.create_equipment_lifetime_report(self.ders)", "def Calculate(WA_HOME_folder, Basin, P_Product, ET_Product, LAI_Product, NDM_Product, NDVI_Product, dict_crops, dict_non_crops, Startdate, Enddate, Simulation): \n ######################### Import WA modules ###################################\n \n from wa.General import raster_conversions as RC\n from wa.General import data_conversions as DC\n import wa.Functions.Three as Three\n import wa.Functions.Two as Two\n import wa.Functions.Start as Start\n import wa.Generator.Sheet3 as Generate\n import wa.Functions.Start.Get_Dictionaries as GD\n \n ######################### Set General Parameters ##############################\n\n # Check if there is a full year selected between Startdate and Enddate, otherwise Sheet 3 cannot be produced \n try:\n years_end = pd.date_range(Startdate,Enddate,freq=\"A\").year\n years_start = pd.date_range(Startdate,Enddate,freq=\"AS\").year\n if (len(years_start) == 0 or len(years_end) == 0):\n print \"Calculation period is less than a year, which is not possible for sheet 3\"\n quit\n years = np.unique(np.append(years_end,years_start))\n except:\n print \"Calculation period is less than a year, which is not possible for sheet 3\"\n quit\n\n # Get environmental variable for the Home folder\n if WA_HOME_folder == '':\n WA_env_paths = os.environ[\"WA_HOME\"].split(';')\n Dir_Home = WA_env_paths[0]\n else:\n Dir_Home = WA_HOME_folder\n \t\n # Create the Basin folder\n Dir_Basin = os.path.join(Dir_Home, Basin)\n if not os.path.exists(Dir_Basin):\n os.makedirs(Dir_Basin)\t\n\n # Get the boundaries of the basin based on the shapefile of the watershed\n # Boundaries, Shape_file_name_shp = Start.Boundaries.Determine(Basin)\n Boundaries, Example_dataset = Start.Boundaries.Determine_LU_Based(Basin, Dir_Home)\n \n ############################# Download Data ###################################\n\n # Set the NPP and GPP data for the whole year\n StartYear = Startdate[:4]\n EndYear = Enddate[:4]\n StartdateNDM = '%d-01-01' %int(StartYear)\n EnddateNDM = '%d-12-31' %int(EndYear)\n\n #Set Startdate and Enddate for moving average\n ET_Blue_Green_Classes_dict, Moving_Window_Per_Class_dict = GD.get_bluegreen_classes(version = '1.0') \n Additional_Months_tail = np.max(Moving_Window_Per_Class_dict.values())\n Startdate_Moving_Average = pd.Timestamp(Startdate) - pd.DateOffset(months = Additional_Months_tail)\n Enddate_Moving_Average = pd.Timestamp(Enddate) + pd.DateOffset(months = 0)\n Startdate_Moving_Average_String = '%d-%02d-%02d' %(Startdate_Moving_Average.year, Startdate_Moving_Average.month, Startdate_Moving_Average.day)\n Enddate_Moving_Average_String = '%d-%02d-%02d' %(Enddate_Moving_Average.year, Enddate_Moving_Average.month, Enddate_Moving_Average.day)\n\n # Download data\n Data_Path_P = Start.Download_Data.Precipitation(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_Moving_Average_String, Enddate_Moving_Average_String, P_Product, Daily = 'n') \n Data_Path_ET = Start.Download_Data.Evapotranspiration(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate, ET_Product)\n Data_Path_ETref = Start.Download_Data.ETreference(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_Moving_Average_String, Enddate_Moving_Average_String)\n Data_Path_NDVI = Start.Download_Data.NDVI(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate)\n \n if NDM_Product == 'MOD17':\n Data_Path_NPP = Start.Download_Data.NPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], StartdateNDM, EnddateNDM, NDM_Product) \n Data_Path_GPP = Start.Download_Data.GPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], StartdateNDM, EnddateNDM, NDM_Product) \n\n Data_Path_P_Monthly = os.path.join(Data_Path_P, 'Monthly')\n \n ########################### Create input data #################################\n\n # Create NDM based on MOD17\n if NDM_Product == 'MOD17':\n\n # Create monthly GPP\n Dir_path_GPP = os.path.join(Dir_Basin, Data_Path_GPP)\n Start.Eightdaily_to_monthly_state.Nearest_Interpolate(Dir_path_GPP, StartdateNDM, EnddateNDM)\n Data_Path_NDM = Two.Calc_NDM.NPP_GPP_Based(Dir_Basin, Data_Path_GPP, Data_Path_NPP, Startdate, Enddate)\n\n # Create monthly NDVI based on MOD13\n if NDVI_Product == 'MOD13':\n Dir_path_NDVI = os.path.join(Dir_Basin, Data_Path_NDVI)\n Start.Sixteendaily_to_monthly_state.Nearest_Interpolate(Dir_path_NDVI, Startdate, Enddate)\n\n ###################### Save Data as netCDF files ##############################\n \n #___________________________________Land Use_______________________________\n\n # Get the data of LU and save as nc, This dataset is also used as reference for others\n LUdest = gdal.Open(Example_dataset) \n DataCube_LU = LUdest.GetRasterBand(1).ReadAsArray()\n DataCube_LU[DataCube_LU<0] = np.nan\n\n Name_NC_LU = DC.Create_NC_name('LU', Simulation, Dir_Basin, 3)\n if not os.path.exists(Name_NC_LU):\n DC.Save_as_NC(Name_NC_LU, DataCube_LU, 'LU', Example_dataset)\n\n LUdest = None\n del DataCube_LU\n #_______________________________Evaporation________________________________\n\n # Define info for the nc files\n info = ['monthly','mm', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n # Evapotranspiration data\n Name_NC_ET = DC.Create_NC_name('ET', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_ET):\n\n # Get the data of Evaporation and save as nc\n DataCube_ET = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_ET, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_ET, DataCube_ET, 'ET', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n del DataCube_ET\n\n #____________________________________NDVI__________________________________\n\n info = ['monthly','-', ''.join([Startdate_Moving_Average_String[5:7], Startdate_Moving_Average_String[0:4]]) , ''.join([Enddate_Moving_Average_String[5:7], Enddate_Moving_Average_String[0:4]])]\n\n\n Name_NC_NDVI = DC.Create_NC_name('NDVI', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_NDVI):\n\n # Get the data of Evaporation and save as nc\n DataCube_NDVI = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_NDVI, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_NDVI, DataCube_NDVI, 'NDVI', Example_dataset, Startdate, Enddate, 'monthly', 1)\n del DataCube_NDVI\n\n #______________________________Precipitation_______________________________\n\n # Define info for the nc files\n info = ['monthly','mm', ''.join([Startdate_Moving_Average_String[5:7], Startdate_Moving_Average_String[0:4]]) , ''.join([Enddate_Moving_Average_String[5:7], Enddate_Moving_Average_String[0:4]])]\n\n # Precipitation data\n Name_NC_P = DC.Create_NC_name('Prec', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_P):\n\t\n # Get the data of Precipitation and save as nc\n DataCube_Prec = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_P_Monthly, Startdate_Moving_Average_String, Enddate_Moving_Average_String, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_P, DataCube_Prec, 'Prec', Example_dataset, Startdate_Moving_Average_String, Enddate_Moving_Average_String, 'monthly', 0.01)\n del DataCube_Prec\n\n #________________________Reference Evaporation______________________________\n\n # Reference Evapotranspiration data\n Name_NC_ETref = DC.Create_NC_name('ETref', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_ETref):\n\n # Get the data of Evaporation and save as nc\n DataCube_ETref = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_ETref, Startdate_Moving_Average_String, Enddate_Moving_Average_String, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_ETref, DataCube_ETref, 'ETref', Example_dataset, Startdate_Moving_Average_String, Enddate_Moving_Average_String, 'monthly', 0.01)\n del DataCube_ETref\n\n #___________________________Normalized Dry Matter__________________________\n\n # Define info for the nc files\n info = ['monthly','kg_ha-1', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n Name_NC_NDM = DC.Create_NC_name('NDM', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_NDM):\n\n # Get the data of Evaporation and save as nc\n DataCube_NDM = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_NDM, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_NDM, DataCube_NDM, 'NDM', Example_dataset, Startdate, Enddate, 'monthly', 100)\n del DataCube_NDM\n\n ############################# Calculate Sheet 3 ###########################\n\n # Define info for the nc files\n info = ['monthly','mm', ''.join([Startdate_Moving_Average_String[5:7], Startdate_Moving_Average_String[0:4]]) , ''.join([Enddate_Moving_Average_String[5:7], Enddate_Moving_Average_String[0:4]])]\n\n #____________ Evapotranspiration data split in ETblue and ETgreen ____________\n\n Name_NC_ETgreen = DC.Create_NC_name('ETgreen', Simulation, Dir_Basin, 3, info)\n Name_NC_ETblue = DC.Create_NC_name('ETblue', Simulation, Dir_Basin, 3, info)\n \n if not (os.path.exists(Name_NC_ETgreen) or os.path.exists(Name_NC_ETblue)):\n\n # Calculate Blue and Green ET\n DataCube_ETblue, DataCube_ETgreen = Three.SplitET.Blue_Green(Startdate, Enddate, Name_NC_LU, Name_NC_ETref, Name_NC_ET, Name_NC_P)\n\n # Save the ETblue and ETgreen data as NetCDF files\n DC.Save_as_NC(Name_NC_ETblue, DataCube_ETblue, 'ETblue', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n DC.Save_as_NC(Name_NC_ETgreen, DataCube_ETgreen, 'ETgreen', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n\n del DataCube_ETblue, DataCube_ETgreen\n \n #____________________________ Create the empty dictionaries ____________________________\n \n # Create the dictionaries that are required for sheet 3 \n wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, wp_y_non_crop_dictionary = GD.get_sheet3_empties()\n \n #____________________________________ Fill in the dictionaries ________________________\n\n # Fill in the crops dictionaries \n wp_y_irrigated_dictionary, wp_y_rainfed_dictionary = Three.Fill_Dicts.Crop_Dictionaries(wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, dict_crops, Name_NC_LU, Name_NC_ETgreen, Name_NC_ETblue, Name_NC_NDM, Name_NC_P, Dir_Basin)\n\n # Fill in the non crops dictionaries \n wp_y_non_crop_dictionary = Three.Fill_Dicts.Non_Crop_Dictionaries(wp_y_non_crop_dictionary, dict_non_crops)\n\n for year in years:\n\n ############################ Create CSV 3 ################################# \n \n csv_fh_a, csv_fh_b = Generate.CSV.Create(wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, wp_y_non_crop_dictionary, Basin, Simulation, year, Dir_Basin)\n\n ############################ Create Sheet 3 ############################### \n\n Generate.PDF.Create(Dir_Basin, Basin, Simulation, csv_fh_a, csv_fh_b)\n \n return()", "def calc_cash_flow(self):\n s = self # shortcut variable\n\n # determine the changes caused by the heat pump on an annual basis.\n # First calculate annual totals for base case and heat pump case and\n # then calculate the change.\n ann_base = s.df_mo_dol_base.sum()\n ann_hp = s.df_mo_dol_hp.sum()\n ann_chg = ann_hp - ann_base\n initial_cost = np.zeros(s.hp_life+1)\n \n # Am not automatically adding sales tax to the initial cost as the user was\n # supposed to includes sales tax in their input.\n initial_cost[0] = -s.capital_cost * (1 - s.pct_financed) + s.rebate_dol\n loan_pmt = npf.pmt(s.loan_interest, s.loan_term, s.capital_cost * s.pct_financed)\n if loan_pmt < -0.01: # loan payment is negative\n loan_cost = [0.0] + [loan_pmt] * s.loan_term + [0.0] * (s.hp_life - s.loan_term)\n loan_cost = np.array(loan_cost)\n else:\n loan_cost = 0.0\n op_cost = -s.op_cost_chg * make_pattern(s.inflation_rate, s.hp_life)\n fuel_cost = -ann_chg.secondary_fuel_dol * make_pattern(s.fuel_esc_rate, s.hp_life)\n elec_cost = -ann_chg.elec_dol * make_pattern(s.elec_esc_rate, s.hp_life)\n cash_flow = initial_cost + loan_cost + op_cost + fuel_cost + elec_cost\n\n # calculate cumulative, discounted cash flow.\n disc_factor = np.ones(s.hp_life) * (1 + s.discount_rate)\n disc_factor = np.insert(disc_factor.cumprod(), 0, 1.0)\n cum_disc_cash_flow = np.cumsum(cash_flow / disc_factor)\n \n s.df_cash_flow = pd.DataFrame(\n {'initial_cost': initial_cost,\n 'loan_cost': loan_cost,\n 'op_cost': op_cost,\n 'fuel_cost': fuel_cost,\n 'elec_cost': elec_cost,\n 'cash_flow': cash_flow,\n 'cum_disc_cash_flow': cum_disc_cash_flow,\n }\n )\n s.df_cash_flow.index.name = 'year'\n \n # Calculate IRR and NPV for w/ and w/o PCE.\n s.summary['irr'] = npf.irr(s.df_cash_flow.cash_flow)\n s.summary['npv'] = npf.npv(s.discount_rate, s.df_cash_flow.cash_flow)\n \n # Add some summary fuel and electric usage and unit cost info\n s.summary['fuel_use_base'] = ann_base.secondary_fuel_units\n s.summary['fuel_use_hp'] = ann_hp.secondary_fuel_units\n s.summary['fuel_use_chg'] = ann_chg.secondary_fuel_units\n if ann_chg.secondary_fuel_units != 0.0:\n s.summary['fuel_price_incremental'] = ann_chg.secondary_fuel_dol / ann_chg.secondary_fuel_units\n else:\n s.summary['fuel_price_incremental'] = np.nan\n s.summary['elec_use_base'] = ann_base.elec_kwh\n s.summary['elec_use_hp'] = ann_hp.elec_kwh\n s.summary['elec_use_chg'] = ann_chg.elec_kwh\n s.summary['elec_rate_avg_base'] = ann_base.elec_dol / ann_base.elec_kwh\n s.summary['elec_rate_avg_hp'] = ann_hp.elec_dol / ann_hp.elec_kwh\n s.summary['elec_rate_incremental'] = ann_chg.elec_dol / ann_chg.elec_kwh", "def solve_model(self): \n \n t0 = time.time() #start the clock\n \n # a. benchmark case\n \n #i. joint pdf of productivity state and tau \n self.make_joint_pdf(1)\n \n #ii. set policy. in RR08 the benchmark economy has no taxes nor subsidies\n self.tau_benchmark = np.array([0, 0, 0]) #subsidy rate, excempt rate, tax rate \n self.set_tax_system(self.tau_benchmark) #set tax system\n \n #iii. benchmark equilibrium\n self.Yss_b, self.Kss_b, self.TFPss_b, self.average_firm_size_b, self.E_star_b, _, \\\n _, self.N_ss_b, self.w_ss_b, self.cdf_stationary_b, self.cdf_emp_b = self.solve_stationary_equilibrium()\n \n print(\"\\n-----------------------------------------\")\n print(\"Benchmark Stationary Equilibrium\")\n print(\"-----------------------------------------\")\n print(f\"ss output = {self.Yss_b:.2f}\")\n print(f\"ss capital = {self.Kss_b:.2f}\")\n print(f\"ss tfp = {self.TFPss_b:.2f}\")\n print(f\"ss wage = {self.w_ss_b:.2f}\")\n print(f\"entry mass = {self.E_star_b:.3f}\")\n print(f\"avg. firm size = {self.average_firm_size_b:.2f}\")\n \n #b. plot (note that the distributions plotted here are unaffected by the distortionary policies)\n \n if self.plott:\n #i. initialize\n employed = [4.99, 49.99]\n firm_size_by_employee = np.zeros(len(employed)+1)\n share_employment = np.zeros(len(employed)+1)\n \n \n #i. percentage of firms that employ employed\n \n for i_e in range(len(employed)):\n summ = np.sum(firm_size_by_employee)\n interpolate = self.interpol(self.labor_demand_rel, self.cdf_stationary_b, employed[i_e])[0] #labor_demand_rel is labor demand with the lowest value normalized to 1\n firm_size_by_employee[i_e] = interpolate - summ\n firm_size_by_employee[-1] = 1 - np.sum(firm_size_by_employee)\n \n plt.pie(firm_size_by_employee, labels=['<5','5<50','50 =<'], autopct=\"%.1f%%\")\n plt.title('Size of Firms by Firm Size (Number of Employees)')\n plt.savefig('firm_size_rr08.pdf')\n plt.show()\n \n \n #ii. employment percentage by firm size\n for i_e in range(len(employed)):\n summ = np.sum(share_employment)\n interpolate = self.interpol(self.labor_demand_rel, self.cdf_emp_b , employed[i_e])[0]\n share_employment[i_e] = interpolate - summ\n share_employment[-1] = 1 - np.sum(share_employment)\n \n plt.pie(share_employment, labels=['<5','5<50','50 =<'], autopct=\"%.1f%%\")\n plt.title('Employment Share by Firm Size (Number of Employees)')\n plt.savefig('employment_by_firm_size_rr08.pdf')\n plt.show()\n \n #iii. productivity cdf and employment cdf\n plt.plot(self.grid_s, self.cdf_stationary_b)\n plt.plot(self.grid_s, self.cdf_emp_b)\n plt.title('Stationary CDF' )\n plt.xlabel('Productivity level')\n plt.ylabel('Cumulative Sum')\n plt.legend(['Firms by Productivity Level','Share of Employment'])\n plt.savefig('cdf_rr08.pdf')\n plt.show()\n \n \n \n #c. distortion case\n \n #i. joint pdf of productivity state and tau \n self.make_joint_pdf(0)\n \n #ii. compute stationary economy for each tau\n \n for idx, tau in enumerate(self.tau_vector):\n \n #iii. find the subsidy rate that generates the same capital stock as in benchmark economy\n self.tau_s[idx] = self.find_subsidy_rate(tau)\n \n # set tax system with newly found tau_s and given tau\n tauv = np.array([-self.tau_s[idx], self.excempt_frac, tau]) #subsidy rate, excempt rate, tax rate \n self.set_tax_system(tauv) #set tax system\n \n #v. distorted stationary equilibrium\n self.Yss_d[idx], self.Kss_d[idx], self.TFPss_d[idx], self.average_firm_size_d[idx], self.E_star_d[idx], \\\n self.Y_set_d[idx,:], self.subsidy_size_d[idx], self.N_ss_d[idx], self.w_ss_d[idx],\\\n _, _ = self.solve_stationary_equilibrium()\n \n print(\"\\n-----------------------------------------\")\n print(\"Distorted Stationary Equilibrium\")\n print(\"-----------------------------------------\\n\")\n if self.distortion_case == 1:\n print(\"Tax/Subidy Uncorrelated with Firm Level Producitivity\\n\")\n elif self.distortion_case == 2:\n print(\"Tax/Subidy Negatively Correlated with Firm Level Producitivity\")\n print(\"(low productivity firms recieve subsidy, high productivity taxed)\\n\")\n elif self.distortion_case == 2:\n print(\"Tax/Subidy Positively Correlated with Firm Level Producitivity\")\n print(\"(high productivity firms recieve subsidy, low productivity taxed)\\n\")\n if self.policy_type == 1 :\n print(\"Tax Type: Tax on output\\n\")\n elif self.policy_type == 2 :\n print(\"Tax Type: Tax on capital\\n\")\n elif self.policy_type == 3 :\n print(\"Tax Type: Tax on labor\\n\")\n print(f\"fraction of firms recieving subsidy = {self.subsidy_frac:.2f}\")\n print(f\"fraction of firms taxed = {1-self.subsidy_frac-self.excempt_frac:.2f}\")\n print(f\"fraction of firms excempt = {self.excempt_frac:.2f}\")\n print(\"-----------------------------------------\\n\")\n \n print(tabulate([['relative Yss', round(self.Yss_d[0]/self.Yss_b, 2), round(self.Yss_d[1]/self.Yss_b, 2), round(self.Yss_d[2]/self.Yss_b, 2), round(self.Yss_d[3]/self.Yss_b, 2)],\n ['relative TFPss', round(self.TFPss_d[0]/self.TFPss_b, 2), round(self.TFPss_d[1]/self.TFPss_b, 2), round(self.TFPss_d[2]/self.TFPss_b, 2), round(self.TFPss_d[3]/self.TFPss_b, 2)], \n ['relative entry mass', round(self.E_star_d[0]/self.E_star_b, 2), round(self.E_star_d[1]/self.E_star_b, 2), round(self.E_star_d[2]/self.E_star_b, 2), round(self.E_star_d[3]/self.E_star_b, 2)],\n ['share of subsidized output', round(self.Y_set_d[0,0], 2), round(self.Y_set_d[1,0], 2), round(self.Y_set_d[2,0], 2), round(self.Y_set_d[3,0], 2)],\n ['total subsidy paid of output', round(self.subsidy_size_d[0], 2), round(self.subsidy_size_d[1], 2), round(self.subsidy_size_d[2], 2), round(self.subsidy_size_d[3], 2)],\n ['subsidy rate (tau_s)', round(self.tau_s[0], 2), round(self.tau_s[1], 2), round(self.tau_s[2], 2), round(self.tau_s[3], 2)],\n [], \n ['relative Kss', round(self.Kss_d[0]/self.Kss_b, 2), round(self.Kss_d[1]/self.Kss_b, 2), round(self.Kss_d[2]/self.Kss_b, 2), round(self.Kss_d[3]/self.Kss_b, 2)], \n ['relative wss', round(self.w_ss_d[0]/self.w_ss_b, 2), round(self.w_ss_d[1]/self.w_ss_b, 2), round(self.w_ss_d[2]/self.w_ss_b, 2), round(self.w_ss_d[3]/self.w_ss_b, 2)], \n ['relative Nss', round(self.N_ss_d[0]/self.N_ss_b, 2), round(self.N_ss_d[1]/self.N_ss_b, 2), round(self.N_ss_d[2]/self.N_ss_b, 2), round(self.N_ss_d[3]/self.N_ss_b, 2)], \n ['relative avg. firm size', round(self.average_firm_size_d[0]/self.average_firm_size_b, 2), round(self.average_firm_size_d[1]/self.average_firm_size_b, 2), round(self.average_firm_size_d[2]/self.average_firm_size_b, 2), round(self.average_firm_size_d[3]/self.average_firm_size_b, 2)]],\n headers=['Variable', 'Tax = '+str(self.tau_vector[0]), \"Tax = \"+str(self.tau_vector[1]), 'Tax = '+str(self.tau_vector[2]), 'Tax = '+str(self.tau_vector[3])]))\n \n\n t1 = time.time()\n print(f'\\nTotal Run Time: {t1-t0:.2f} seconds')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the Average Diesel load of the current system Attributes
def calc_average_load (self): #~ self.generation = self.forecast.generation_by_type['generation diesel']\ #~ [self.start_year] self.average_load = \ self.forecast.yearly_average_diesel_load.ix[self.start_year]
[ "def calc_average_load (self):\n if self.comp_specs['proposed capacity'] != UNKNOWN:\n self.average_load = None\n self.generation = self.forecast.generation['generation diesel']\\\n [self.start_year]\n self.average_load = \\\n self.forecast.yearly_average_diesel_load.ix[self.start_year]\n #~ print 'self.average_load',self.average_load", "def load_average():\n\n return {\"load_avg\" : os.getloadavg() }", "def avg_load(self):\r\n # Sum the load of each server and divide by the amount of servers\r\n total = 0\r\n count = 0\r\n for serv in self.servers:\r\n total += serv.load()\r\n count += 1\r\n avg = total / count\r\n return avg", "def load_average():\n try:\n cpu_load_avg = _proc_stat_cpu_load_average()\n except OSError:\n cpu_load_avg = None\n return cpu_load_avg", "def load_stat():\n loadavg = {}\n f = open(\"/proc/loadavg\")\n con = f.read().split()\n f.close()\n loadavg['lavg_1'] = con[0]\n loadavg['lavg_5'] = con[1]\n loadavg['lavg_15'] = con[2]\n loadavg['nr'] = con[3]\n loadavg['last_pid'] = con[4]\n return loadavg", "def totalEffectiveLoad(self):\n return sum(s.effectiveLoad() for s in self.dispatcher.statuses)", "def darwin_readLoadAvg(self):\n try:\n loadAvg = os.popen('sysctl vm.loadavg')\n line = loadAvg.readline()\n loadAvg.close()\n elem = re.split(r\"\\s+\", line)\n self.data['load1'] = float(elem[1])\n self.data['load5'] = float(elem[2])\n self.data['load15'] = float(elem[3])\n except IOError as ex:\n del ex\n self.logger.log(Logger.ERROR, \"ProcInfo: cannot run 'sysctl vm.loadavg\")\n return", "def average_performance(self):\n\n print(f\"Average performance: {self.performance / 10}\")", "def readLoadAvg(self):\n try:\n with open('/proc/loadavg') as fd:\n line = fd.readline()\n elem = re.split(r\"\\s+\", line)\n self.data['load1'] = float(elem[0])\n self.data['load5'] = float(elem[1])\n self.data['load15'] = float(elem[2])\n except IOError as ex:\n del ex\n self.logger.log(Logger.ERROR, \"ProcInfo: cannot open /proc/meminfo\")\n return", "def _get_cpu_load_average_fifteen_min(self):\n return self.__cpu_load_average_fifteen_min", "def _get_cpu_load_average_five_min(self):\n return self.__cpu_load_average_five_min", "def _get_cpu_load_average_one_min(self):\n return self.__cpu_load_average_one_min", "def load_avg():\n \n with open(Path.proc_loadavg()) as f:\n line = f.readline()\n \n load_avgs = [float(x) for x in line.split()[:3]]\n \n return load_avgs", "def avgcpu(self):\n return (self._total_cpu['value'] / self._total_cpu['count']) if self._total_cpu['count'] else 0", "def ensemble_average_energy(self):", "def get_avg_cpu_utilization_percentage_for_environment(config):\n logger = logging.getLogger(__name__)\n instances = aws_ec2.get_running_instances(config)\n cw = boto.ec2.cloudwatch.connect_to_region(config.get('region'))\n env_metric = EnvMetric()\n for instance in instances:\n list_metrics = cw.list_metrics(dimensions={'InstanceId': instance.id}, metric_name='CPUUtilization')\n #Newly added instances do not have recorded data, thus the query returns an empty list\n if len(list_metrics) > 0:\n inst_metric = InstanceMetric(instance,list_metrics[0])\n start,end = get_start_end_statistics_time(config)\n inst_metric.query = list_metrics[0].query(start, end, ['Average'])\n percent, num = inst_metric.average_percentage()\n rec = str(inst_metric.metric_records())\n logger.info('%s: CPU %.0f%% for %d min. %s' %(inst_metric.instance.id, percent, num,rec))\n env_metric.instance_metrics.append(inst_metric)\n now = str(time.time()).split('.')[0]\n now_human = str(datetime.datetime.now())\n percent, num = env_metric.get_average_percentage()\n data = '%s, %s, %.2f, %d, %d' %(now_human, now, percent, len(config.get_list('instances')), len(config.get_list('stopped_instances')))\n logger.info(data)\n print(data)\n return env_metric", "def get_avg_load(verbose=False):\n output = run(\"top -d0.5 -n4 | grep Cpu\", quiet=True)\n\n # Strip formatting control characters (top output can have a lot of these)\n output = (output.replace('\\x1b(B','')\n .replace('\\x1b[m','')\n .replace('\\x1b[K','')\n .replace('\\x1b[39;49m',''))\n\n output = output.splitlines()\n\n loads = []\n for i in xrange(len(output)):\n # Top output tends to look like\n # Cpu(s): 2.9%us, 0.0%sy, 0.0%ni, ... OR\n # Cpu(s): 2.9% us, 0.0% sy, 0.0% ni, ... OR\n # %Cpu(s): 2.9 us, 0.0 sy, 0.0 ni, ...\n # We use a regex to match the floating point value for percentage load\n regex = re.compile(\n \"\"\"\n .*Cpu\\(s\\): # any chars before \"Cpu(s):\"\n \\s* # any amount of whitespace\n (\\d*.?\\d*) # any digits, <= 1 period, any digits (i.e. any positive float)\n \\s* # any amount of whitespace\n %? # <= 1 percent symbol (some versions of top just have one \"%\" on this line, before \"Cpu(s)\"\n \\s* # any amount of whitespace\n us # total system load appears to be marked \"us\"\n \"\"\", re.VERBOSE)\n\n matches = regex.findall(output[i])\n #print(repr(output[i]))\n if (len(matches) == 1):\n load = float(matches[0])\n loads.append(load)\n else:\n print(\"Error: On host = {Host}, unable to match total cpu load in string\\n{Output}\"\n .format(Host = env.host, Output = output[i]))\n\n # Throw out the first record of CPU load because it always seems to spike\n # briefly after the command is issued.\n loads = loads[1:]\n avg_load = None\n if len(loads) != 0:\n avg_load = sum(loads)/float(len(loads))\n else:\n print(\"Error: On host = {Host}, len(loads) == 0\"\n .format(Host = env.host))\n\n if (verbose):\n print(\"{Host:4} | Average load: {Load:3.2f}%\".format(Host=env.host, Load=avg_load))\n\n return avg_load", "def calculate_average(self):\n queryset = Eventtime.objects.filter(\n user=self.user).values('sleep_data')\n count = 0\n sums = 0\n for i in queryset:\n time = i['sleep_data'].split(\" \")\n count += 1\n if time[0] == \"\":\n return 0\n else:\n sums += float(time[0])\n if count == 0:\n return 0\n return sums/count", "def _find_average_age():\r\n count, total = 0, 0\r\n for resource in resources:\r\n patient = resource[\"resource\"]\r\n if \"birthDate\" in patient:\r\n count += 1\r\n dob = patient[\"birthDate\"].split(\"-\")\r\n dob = datetime(int(dob[0]), int(dob[1]), int(dob[2]), 0, 0, 0, 0)\r\n if \"deceasedDateTime\" in patient:\r\n death_time = patient[\"deceasedDateTime\"].split(\"T\")[0].split(\r\n \"-\")\r\n death_time = datetime(int(death_time[0]), int(death_time[1]),\r\n int(death_time[2]), 0, 0, 0, 0)\r\n else:\r\n death_time = datetime.now()\r\n age = relativedelta(death_time, dob).years\r\n total += age\r\n if count == 0:\r\n return count, count\r\n return total / count, count" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the generation offset by connecting a transmission line to the community to connect to. Attributes
def calc_intertie_offset_generation (self): self.generation = \ self.forecast.get_generation(self.start_year,self.end_year) dist = self.comp_specs['distance to community'] self.annual_transmission_loss = \ 1 - ( (1- (self.comp_specs['transmission loss per mile']/ 100.0)) ** dist) self.intertie_offset_generation = \ self.generation * (1 + self.annual_transmission_loss) gen_eff = self.intertie_generation_efficiency self.intertie_offset_generation_fuel_used = \ self.intertie_offset_generation / gen_eff #~ print 'self.proposed_generation',self.proposed_generation #~ print con
[ "def calculate_and_set_propagation_distances(self):\n\n self.l_edge = self.calculate_distance_edge()\n self.l_int = self.calculate_distance_interaction()", "def calculate_module_offsets(self):\n \n # These aren't for instantiating, but we use them to get the dimensions\n self.poly_contact_offset = vector(0.5*contact.poly.width,0.5*contact.poly.height)\n\n # M1/M2 routing pitch is based on contacted pitch\n self.m1_pitch = max(contact.m1m2.width,contact.m1m2.height) + max(self.m1_space,self.m2_space)\n self.m2_pitch = max(contact.m2m3.width,contact.m2m3.height) + max(self.m2_space,self.m3_space)\n \n # This corrects the offset pitch difference between M2 and M1\n self.offset_fix = vector(0.5*(self.m2_width-self.m1_width),0)\n\n # delay chain will be rotated 90, so move it over a width\n # we move it up a inv height just for some routing room\n self.rbl_inv_offset = vector(self.delay_chain.height, self.inv.width)\n # access TX goes right on top of inverter, leave space for an inverter which is\n # about the same as a TX. We'll need to add rails though.\n self.access_tx_offset = vector(1.25*self.inv.height,self.rbl_inv_offset.y) + vector(0,2.5*self.inv.width)\n self.delay_chain_offset = self.rbl_inv_offset + vector(0,4*self.inv.width)\n\n # Replica bitline and such are not rotated, but they must be placed far enough\n # away from the delay chain/inverter with space for three M2 tracks\n self.bitcell_offset = self.rbl_inv_offset + vector(2*self.m2_pitch, 0) + vector(0, self.bitcell.height + self.inv.width)\n\n self.rbl_offset = self.bitcell_offset\n\n \n self.height = self.rbl_offset.y + self.rbl.height + self.m2_pitch\n self.width = self.rbl_offset.x + self.bitcell.width", "def get_propagation_time(self):\n return 0.0 # self.get_distance_to_gateway() / (3 * pow(10,8))", "def offset(self):\n if self.valid:\n if self.type == 4:\n return ipv4_to_dec(self.addr) - ipv4_to_dec(self.network)\n else:\n return ipv6_to_dec(self.addr) - ipv6_to_dec(self.network)", "def compute_propagation_delay(self):\n d = self.distance / 2e8\n return d", "def annotation_offset(self):\n\n return self._seq.offset + self._seq.start", "def lidar_relative(self):\n return self.distance", "def get_shapeOffset(self):\n try:\n _str_func = ' get_shapeOffset'.format(self)\n log.debug(\"|{0}| >> ... [{1}]\".format(_str_func,self)+ '-'*80)\n \n ml_check = self.getBlockParents()\n ml_check.insert(0,self)\n \n for mBlock in ml_check:\n l_attrs = ['controlOffset','skinOffset']\n for a in l_attrs:\n if mBlock.hasAttr(a):\n v = mBlock.getMayaAttr(a)\n log.debug(\"|{0}| >> {1} attr found on rigBlock: {2} | {3}\".format(_str_func,a,v,mBlock.mNode)) \n return v \n return 1\n except Exception,err:cgmGEN.cgmExceptCB(Exception,err,msg=vars())", "def get_offset(self, row):\n y_off = row * self.and2.height\n if row % 2:\n y_off += self.and2.height\n mirror=\"MX\"\n else:\n mirror=\"R0\"\n\n return (y_off, mirror)", "def align_xy_lineofnodes(self, galaxy) :\n self._mat_lon_NE = galaxy._mat_lon.dot(self._mat_NE)\n self.X_lon, self.Y_lon = self.rotate(matrix=self._mat_lon_NE)", "def _build_line(self):\n # Build the line position (consecutive segments):\n nnz_x, nnz_y = np.where(~self._edges.mask)\n indices = np.c_[nnz_x, nnz_y].flatten()\n line_pos = self._pos[indices, :]\n\n # Color either edges or nodes :\n if self._color_by == 'strength':\n nnz_values = self._edges.compressed()\n values = np.c_[nnz_values, nnz_values].flatten()\n elif self._color_by == 'count':\n node_count = Counter(np.ravel([nnz_x, nnz_y]))\n values = np.array([node_count[k] for k in indices])\n self._minmax = (values.min(), values.max())\n if self._clim is None:\n self._clim = self._minmax\n\n # Get the color according to values :\n if isinstance(self._custom_colors, dict): # custom color\n if None in list(self._custom_colors.keys()): # {None : 'color'}\n color = color2vb(self._custom_colors[None], length=len(values))\n else: # black by default\n color = np.zeros((len(values), 4), dtype=np.float32)\n for val, col in self._custom_colors.items():\n color[values == val, :] = color2vb(col)\n else:\n color = array2colormap(values) #, **self.to_kwargs())\n color[:, -1] = self._alpha\n\n # Dynamic color :\n if self._dynamic is not None:\n color[:, 3] = normalize(values.copy(), tomin=self._dynamic[0],\n tomax=self._dynamic[1])\n\n # Send data to the connectivity object :\n self._connect.set_data(pos=line_pos, color=color)", "def _position_to_offset(self, position: Position) -> int:\n return self._line_offsets[position.line] + position.character", "def get_relative_offset(self):\n\n\t\treturn self.get_offset_1()", "def update_offset(self):\n self.offset = self.random_generator.random()", "def get_line_position(self):\r\n # get each line sensor value\r\n voltages = [\r\n self.adc.read_voltage(pin)\r\n for pin\r\n in range(1, 9)\r\n ]\r\n line_position = sum(\r\n [\r\n voltages[i] * (i - 3.5)\r\n for i\r\n in range(8)\r\n ]\r\n ) / sum(voltages)\r\n\r\n return line_position", "def assign_lengths(G):\r\n for u, v, d in G.edges(data=True):\r\n posA = nx.get_node_attributes(G, 'pos')[u]\r\n posB = nx.get_node_attributes(G, 'pos')[v]\r\n\r\n dist = np.linalg.norm(np.subtract(posA, posB))\r\n d['distance'] = dist\r\n return G", "def relative_rate(self):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr_relative_rate(self)", "def calculate_distance_line(\n r_packet, comov_nu, is_last_line, nu_line, time_explosion\n):\n\n nu = r_packet.nu\n\n if is_last_line:\n return MISS_DISTANCE\n\n nu_diff = comov_nu - nu_line\n\n # for numerical reasons, if line is too close, we set the distance to 0.\n if r_packet.is_close_line:\n nu_diff = 0.0\n r_packet.is_close_line = False\n\n if nu_diff >= 0:\n distance = (nu_diff / nu) * C_SPEED_OF_LIGHT * time_explosion\n else:\n print(\"WARNING: nu difference is less than 0.0\")\n raise MonteCarloException(\n \"nu difference is less than 0.0; for more\"\n \" information, see print statement beforehand\"\n )\n\n if numba_config.ENABLE_FULL_RELATIVITY:\n return calculate_distance_line_full_relativity(\n nu_line, nu, time_explosion, r_packet\n )\n return distance", "def line_cost(self):\n\n return self.line_length * self.line_cost_rate" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the heat recovery
def calc_lost_heat_recovery (self): if not self.cd['heat recovery operational']: self.lost_heat_recovery = [0] else: gen_eff = self.cd["diesel generation efficiency"] self.lost_heat_recovery = \ (self.generation / gen_eff )* .10
[ "def _calculate_heat(self, traj):\n \n pass", "def calc_loss_heat_recovery (self):\n hr_used = self.cd['heat recovery operational']\n self.loss_heat_recovery = 0\n if hr_used:# == 'Yes':\n self.loss_heat_recovery = self.electric_diesel_reduction * \\\n (self.comp_specs['percent heat recovered'] / 100.0)\n #~ print 'self.loss_heat_recovery',self.loss_heat_recovery", "def manipulate_heat_data(self): \n self.exh.T_array = ( 0.5 * (self.exh.T_inlet_array +\n self.exh.T_outlet_array) + 273.15)\n self.exh.delta_T_array = ( self.exh.T_inlet_array -\n self.exh.T_outlet_array )\n \n self.cool.delta_T_array = ( self.cool.T_inlet_array -\n self.cool.T_outlet_array )\n self.cool.C = self.cool.mdot * self.cool.c_p", "def get_specific_heat() -> float:\n return 1006.0", "def heat_func(self):\n return self.Q.val - self.inl[0].m.val_SI * (\n self.inl[0].h.val_SI - self.outl[0].h.val_SI)", "def heat_transfer(self, T_steel):\n\n kair = 0.02587 # W / (m K)\n rhoair = 1.204 # kg / m^3\n muair = 1.813e-5 # kg/m*s\n Cpair = 1006 # J/K*kg\n beta = 3.43e-3 # 1/K (Coefficient of thermal expansion)\n g = 9.80665 # m/s2\n T0 = 20\n Da = self.Autoclave.D_Out\n\n\n Gr_a = g * beta * Da ** 3 * rhoair ** 2 * (T_steel - T0) / (muair ** 2) # Grashof number, free flow\n Pr_a = Cpair * muair / kair # Prandtl number, free flow\n Ra_a = Gr_a * Pr_a\n\n Nu = (0.60 + (0.387 * Ra_a ** (1 / 6)) / (1 + (0.0559 / Pr_a) ** (9 / 16)) ** (8 / 27)) ** 2\n h = Nu * kair / Da\n\n #\n #\n # rho = 1.225\n # D = self.Autoclave.D_Out\n # mu_0 = 1.81 * 10 ** (-5)\n # T_0 = self.T0\n # mu = mu_0 * (T / T_0) ** 1.5 * (T_0 + 104.7) / (T + 104.7)\n # k = 0.029\n # C = 0.53\n # N = 0.25\n # alpha = k/rho/self.Gas.Cp\n # beta = 3.41\n # g = 9.81\n # h = k/D*C*((g*beta*D**3*abs(T-T_0)/(mu/rho)**2)*(mu/rho/alpha))**N\n\n return h", "def conductive_heat_flux(discr, eos, cv, grad_t):\n transport = eos.transport_model()\n return -transport.thermal_conductivity(eos, cv)*grad_t", "def soil_heat_flux(Rn, Rs):\r\n threshold = 0.05\r\n \r\n G = 0.1*Rn # daylight\r\n G[Rs < threshold] = 0.5*Rn[Rs < threshold] # nighttime\r\n \r\n return G", "def calHeatMap(self, newhp):\n heatmap = newhp.copy()\n discount = 0.7\n scale = 1.0*discount\n N = len(self.heatmaps)\n Knorm = 1.0\n for i in range(N-1, -1, -1):\n heatmap = heatmap + scale*self.heatmaps[i]\n Knorm = Knorm+scale\n scale = scale*discount\n return heatmap/Knorm", "def conduct_heat(self, delta_time, external_power):\n\t\tself.temperature_container = self.temperature_container+self.area*external_power*delta_time/(self.heat_capacity_container*self.mass_container)#https://en.wikipedia.org/wiki/Heat_capacity\n\t\t\n\t\tinternal_power = 0.591*(self.temperature_container-self.temperature)/0.01#No idea of this is right. Mainly the devides by its length bit. https://en.wikipedia.org/wiki/Thermal_conduction#Fourier's_law\n\t\t\n\t\tif (self.heat_capacity*self.mass())!=0:\n\t\t\tself.temperature = self.temperature+internal_power*delta_time/(self.heat_capacity*self.mass())\n\t\t\t#self.temperature_container=self.temperature_container-internal_power*delta_time/(self.heat_capacity_container*self.mass_container)#Als je dit toevoegd lijkt de simulatie niet goed meer te werken dus nog even uitzoeken heo dat zit.", "def heatrate(self):\n return self.power - self.lumens / self.spectrum.lumens_per_watts", "def heat_of_comb(self,T):\n hreac = float(0)\n for reac in self.reactants:\n hreac += reac[0].ho(T)*reac[1]\n \n hprod = float(0)\n for prod in self.products:\n hprod += prod[0].ho(T)*prod[1]\n\n return float(hreac-hprod)/self.fuels.mm", "def _compute_temperature(self):\n return 4.0 / np.log(1.0 + 4.0 / (self.demon_energy_accum / (self.mcs * self.N ** 2)))", "def load_heatdiag(self, **kwargs):\n read_rz = kwargs.get('read_rz',True) #read heat load in RZ\n\n self.hl=[]\n self.hl.append( self.datahlp(\"xgc.heatdiag.bp\",0,read_rz) ) #actual reading routine\n self.hl.append( self.datahlp(\"xgc.heatdiag.bp\",1,read_rz) )#actual reading routine\n\n for i in [0,1] :\n try:\n self.hl[i].psin=self.hl[i].psi[0,:]/self.psix #Normalize 0 - 1(Separatrix)\n except:\n print(\"psix is not defined - call load_unitsm() to get psix to get psin\")\n\n #read bfieldm data if available\n self.load_bfieldm()\n\n #dt=self.unit_dic['sml_dt']*self.unit_dic['diag_1d_period']\n wedge_n=self.unit_dic['sml_wedge_n']\n for i in [0,1]:\n dpsin=self.hl[i].psin[1]-self.hl[i].psin[0] #equal dist\n #ds = dR* 2 * pi * R / wedge_n\n ds=dpsin/self.bfm.dpndrs* 2 * 3.141592 * self.bfm.r0 /wedge_n #R0 at axis is used. should I use Rs?\n self.hl[i].rmid=np.interp(self.hl[i].psin,self.bfm.psino,self.bfm.rmido)\n self.hl[i].post_heatdiag(ds)\n self.hl[i].total_heat(wedge_n)", "def test_calculate_specific_heat(self):\n expected = np.array([1089.5, 1174.0, 1258.5], dtype=np.float32)\n result = WetBulbTemperature()._calculate_specific_heat(self.mixing_ratio)\n self.assertArrayAlmostEqual(result, expected, decimal=2)", "def heat_balance(index):\n t = index[0]\n return (\n heat_hru_out[t]\n + pulp.lpSum([component_output[i, t] for i in index_heat_out])\n - pulp.lpSum([component_input[i, t] for i in index_heat_in])\n + heat_unserve[t]\n - heat_dump[t]\n == forecast[\"heat_load\"][t]\n )", "def compute_energy(img):\r\n # urmati urmatorii pasi:\r\n # 1. transformati imagine in grayscale\r\n # 2. folositi filtru sobel pentru a calcula gradientul in directia X si Y\r\n # 3. calculati magnitudinea imaginii\r\n\r\n img_gray_scale = cv.cvtColor(img, cv.COLOR_BGR2GRAY);\r\n\r\n #de cautat totusi si codul pt SOBEL pe net\r\n grad_x = cv.Sobel(img_gray_scale, ddepth = cv.CV_16S, dx = 1, dy = 0, borderType = cv.BORDER_CONSTANT)\r\n grad_y = cv.Sobel(img_gray_scale, ddepth = cv.CV_16S, dx = 0, dy = 1, borderType = cv.BORDER_CONSTANT)\r\n\r\n#E repr gradientii aka cat se sch un pixel de la unul la altul\r\n E = abs(grad_x) + abs(grad_y)\r\n # print(grad_y)\r\n # print(grad_x)\r\n\r\n cv.imwrite(\"poza.jpg\", E)\r\n return E", "def diffusive_heat_flux(discr, eos, cv, j):\n if isinstance(eos, MixtureEOS):\n h_alpha = eos.species_enthalpies(cv)\n return sum(h_alpha.reshape(-1, 1) * j)\n return 0", "def thermalization_analysis():\n\n verbose = True\n run_pre_analysis = True\n mark_every = 50\n mc_cutoff = -1 # Skip every 100 points with 2000 therm-steps!!\n batch_folder = check_relative_path(\"data/thermalization_data\")\n base_figure_folder = check_relative_path(\"figures/\")\n base_figure_folder = os.path.join(base_figure_folder,\n \"thermalization_analysis\")\n check_folder(base_figure_folder, verbose=verbose)\n\n default_params = get_default_parameters(\n data_batch_folder=\"temp\", include_euclidean_time_obs=False)\n\n ############ COLD START #############\n cold_beta60_params = copy.deepcopy(default_params)\n cold_beta60_params[\"batch_folder\"] = batch_folder\n cold_beta60_params[\"batch_name\"] = \"B60_THERM_COLD\"\n cold_beta60_params[\"load_binary_file\"] = False\n cold_beta60_params[\"beta\"] = 6.0\n cold_beta60_params[\"topc_y_limits\"] = [-2, 2]\n cold_beta60_params[\"num_bins_per_int\"] = 32\n cold_beta60_params[\"bin_range\"] = [-2.5, 2.5]\n cold_beta60_params[\"hist_flow_times\"] = [0, 250, 600]\n cold_beta60_params[\"NCfgs\"] = get_num_observables(\n cold_beta60_params[\"batch_folder\"],\n cold_beta60_params[\"batch_name\"])\n cold_beta60_params[\"obs_file\"] = \"8_6.00\"\n cold_beta60_params[\"N\"] = 8\n cold_beta60_params[\"NT\"] = 16\n cold_beta60_params[\"color\"] = \"#377eb8\"\n\n ########## HOT RND START ############\n hot_rnd_beta60_params = copy.deepcopy(default_params)\n hot_rnd_beta60_params[\"batch_folder\"] = batch_folder\n hot_rnd_beta60_params[\"batch_name\"] = \"B60_THERM_HOT_RND\"\n\n ########## HOT RST START ############\n hot_rst_beta60_params = copy.deepcopy(default_params)\n hot_rst_beta60_params[\"batch_folder\"] = batch_folder\n hot_rst_beta60_params[\"batch_name\"] = \"B60_THERM_HOT_RST\"\n\n if run_pre_analysis:\n # Submitting distribution analysis\n cold_data = load_observable(cold_beta60_params)\n hot_rnd_data = load_observable(hot_rnd_beta60_params)\n hot_rst_data = load_observable(hot_rst_beta60_params)\n\n # # Loads post analysis data\n # cold_data = post_analysis.PostAnalysisDataReader(\n # [cold_beta60_params],\n # observables_to_load=cold_beta60_params[\"observables\"],\n # verbose=verbose)\n\n # hot_rnd_data = post_analysis.PostAnalysisDataReader(\n # [hot_rnd_beta60_params],\n # observables_to_load=hot_rnd_beta60_params[\"observables\"],\n # verbose=verbose)\n\n # hot_rst_data = post_analysis.PostAnalysisDataReader(\n # [hot_rst_beta60_params],\n # observables_to_load=hot_rst_beta60_params[\"observables\"],\n # verbose=verbose)\n\n # TODO: plot termaliations for the 3 different observables\n\n plot_types = [\"default\", \"loglog\", \"logx\", \"logy\"]\n\n y_labels = [\n [r\"$P$\", r\"$Q$\", r\"$E$\"],\n [r\"$\\frac{|P - \\langle P \\rangle|}{\\langle P \\rangle}$\", \n r\"$\\frac{|Q - \\langle Q \\rangle|}{\\langle Q \\rangle}$\",\n r\"$\\frac{|E - \\langle E \\rangle|}{\\langle E \\rangle}$\"],\n [r\"$|P - \\langle P \\rangle|$\", r\"$|Q - \\langle Q \\rangle|$\",\n r\"$|E - \\langle E \\rangle|$\"]]\n # y_labels[i_dr] = [r\"$\\langle P \\rangle$\", r\"$\\langle P \\rangle$\",\n # r\"$\\langle P \\rangle$\"]\n\n subplot_rows = [1, 3]\n\n # Limits to be put on plot\n x_limits = [[] for i in range(3)]\n y_limits = [[], [], []]\n\n data_representations = [\"default\", \"relerr\", \"abserr\"]\n\n obs_list = cold_data[\"obs\"].keys()\n\n x_label = r\"$t_\\mathrm{MC}$\"\n\n for i_dr, dr in enumerate(data_representations):\n for pt in plot_types:\n for i_obs, obs in enumerate(obs_list):\n for plot_rows in subplot_rows:\n\n # Sets up figure folder for observable\n figure_folder = os.path.join(base_figure_folder, obs)\n check_folder(figure_folder, verbose=verbose)\n\n # Sets up plot type folder \n figure_folder = os.path.join(figure_folder, pt)\n check_folder(figure_folder, verbose=verbose)\n\n if obs == \"energy\":\n correction_factor = - 1.0 / 64\n cold_data[\"obs\"][obs] *= correction_factor\n hot_rnd_data[\"obs\"][obs] *= correction_factor\n hot_rst_data[\"obs\"][obs] *= correction_factor\n\n # Retrieves data and makes modifications\n _cold_data = modify_data(\n cold_data[\"obs\"][obs][:mc_cutoff], dr)\n _hot_rnd_data = modify_data(\n hot_rnd_data[\"obs\"][obs][:mc_cutoff], dr)\n _hot_rst_data = modify_data(\n hot_rst_data[\"obs\"][obs][:mc_cutoff], dr)\n\n # Creates figure name\n figure_name = \"{0:s}_{1:s}_{2:s}_{3:d}plotrows.pdf\".format(\n obs, pt, dr, plot_rows)\n\n plot_data_array([np.arange(_cold_data.shape[0])\n for i in range(3)],\n [_cold_data, _hot_rnd_data,\n _hot_rst_data],\n [\"Cold start\", \"Hot start\",\n r\"Hot start, $RST$\"],\n x_label,\n y_labels[i_dr][i_obs],\n figure_name,\n figure_folder,\n plot_type=pt,\n x_limits=x_limits[i_obs],\n y_limits=y_limits[i_obs],\n mark_every=mark_every,\n subplot_rows=plot_rows)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the capital costs. Attributes
def calc_capital_costs (self): road_needed = 'road needed' if self.cd['on road system']: road_needed = 'road not needed' dist = self.comp_specs['distance to community'] self.capital_costs = self.comp_specs['est. intertie cost per mile']\ [road_needed] * dist #~ print self.capital_costs
[ "def calc_capital_costs (self):\n raise NotImplementedError, \"should be implemented by child class to\" +\\\n \" calculate self.capital_costs(the cost of the project) a dollar value\"", "def calc_capital_costs (self):\n self.capital_costs = self.opportunity_HH * self.refit_cost_rate", "def calc_capital_costs (self):\n self.capital_costs = self.max_boiler_output * \\\n self.comp_specs[\"cost per btu/hrs\"]\n #~ print self.capital_costs", "def get_capital_costs (self): # ex: eff(res) G55-V55\n try:\n return self.annual_costs\n except:\n return self.get_nan_range()", "def determine_cost(self):\n pass", "def capital_cost(self, supply_number):\n cost = 0\n for _name, supply_type in NodeType.__members__.items():\n cost += supply_number[supply_type] * self.supplyCapital[supply_type]\n return cost", "def cost(self) -> float:", "def calculateCosts(self):\n self.costs = 0\n for house in self.houses:\n if not house.distance == 1000:\n self.costs += house.distance * 9\n for battery in self.batteries:\n self.costs += battery.costs\n return self.costs", "def calculate_cost(self):\n costs = {}\n if np.abs(self.agent.get_position()[1]) > self.y_lim:\n costs['cost_outside_bounds'] = 1.\n if self.agent.velocity_violation:\n costs['cost_velocity_violation'] = 1.\n # sum all costs in one total cost\n costs['cost'] = min(1, sum(v for k, v in costs.items() if k.startswith('cost_')))\n return costs", "def calc_maintenance_cost(self):\n\n self.maintenance_cost = self.capital_costs * .01", "def getCosts(self):\n return self.costs", "def get_capital_gains(self):\n return self.capital_gains", "def _load_costs(self):\n F_BM = self.F_BM\n F_D = self.F_D\n F_P = self.F_P\n F_M = self.F_M\n baseline_purchase_costs = self.baseline_purchase_costs\n purchase_costs = self.purchase_costs\n installed_costs = self.installed_costs\n \n # Load main costs\n for i in purchase_costs:\n if i not in baseline_purchase_costs:\n baseline_purchase_costs[i] = purchase_costs[i]\n for name, Cpb in baseline_purchase_costs.items(): \n if name in installed_costs and name in purchase_costs:\n continue # Assume costs already added elsewhere using another method\n F = F_D.get(name, 1.) * F_P.get(name, 1.) * F_M.get(name, 1.)\n try:\n installed_costs[name] = Cpb * (F_BM[name] + F - 1.)\n except KeyError:\n F_BM[name] = 1.\n installed_costs[name] = purchase_costs[name] = Cpb * F\n else:\n purchase_costs[name] = Cpb * F", "def get_costs(self):\n return [self.intensity_cost, self.adjustment_cost, self.duration_cost]", "def total_cost(self):\n\t\tif self.tran_type == 'b':\n\t\t\treturn - self.shares * self.price - self.commission\n\t\telse:\n\t\t\treturn self.shares * self.price - self.commission", "def update_capital_stats(self):\n short_capital = 0\n long_capital = 0\n\n for pos in (self.active_long_positions + self.active_short_positions):\n\n if pos.order_type == Consts.LONG:\n long_capital += pos.get_current_liquid_capital()\n else:\n short_capital += pos.get_current_liquid_capital()\n\n self.short_capital = short_capital\n self.long_capital = long_capital", "def compute_costs(self, car):\n self.compute_price(car)\n self.compute_commission()", "def costs(self):\n return self._costs", "def _set_costs(self):\n plant_size_kw = (self.sam_sys_inputs[\"resource_potential\"]\n / self._RESOURCE_POTENTIAL_MULT) * 1000\n\n cc_per_kw = self.sam_sys_inputs.pop(\"capital_cost_per_kw\", None)\n if cc_per_kw is not None:\n capital_cost = cc_per_kw * plant_size_kw\n logger.debug(\"Setting the capital_cost to ${:,.2f}\"\n .format(capital_cost))\n self.sam_sys_inputs[\"capital_cost\"] = capital_cost\n\n dc_per_well = self.sam_sys_inputs.pop(\"drill_cost_per_well\", None)\n num_wells = self.sam_sys_inputs.pop(\"prod_and_inj_wells_to_drill\",\n None)\n if dc_per_well is not None:\n if num_wells is None:\n msg = ('Could not determine number of wells to be drilled. '\n 'No drilling costs added!')\n logger.warning(msg)\n warn(msg)\n else:\n capital_cost = self.sam_sys_inputs[\"capital_cost\"]\n drill_cost = dc_per_well * num_wells\n logger.debug(\"Setting the drilling cost to ${:,.2f} \"\n \"({:.2f} wells at ${:,.2f} per well)\"\n .format(drill_cost, num_wells, dc_per_well))\n self.sam_sys_inputs[\"capital_cost\"] = capital_cost + drill_cost\n\n foc_per_kw = self.sam_sys_inputs.pop(\"fixed_operating_cost_per_kw\",\n None)\n if foc_per_kw is not None:\n fixed_operating_cost = foc_per_kw * plant_size_kw\n logger.debug(\"Setting the fixed_operating_cost to ${:,.2f}\"\n .format(capital_cost))\n self.sam_sys_inputs[\"fixed_operating_cost\"] = fixed_operating_cost" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate annual electric savings created by the project. Attributes
def calc_annual_electric_savings (self): costs = self.comp_specs['diesel generator o&m'] for kW in costs.keys(): try: if self.average_load < int(kW): maintenance = self.comp_specs['diesel generator o&m'][kW] break except ValueError: maintenance = self.comp_specs['diesel generator o&m'][kW] self.baseline_generation_cost = maintenance + \ (self.pre_intertie_generation_fuel_used * self.diesel_prices) maintenance = self.capital_costs * \ (self.comp_specs['percent o&m'] / 100.0) self.proposed_generation_cost = maintenance + \ self.intertie_offset_generation_fuel_used * \ self.intertie_diesel_prices self.annual_electric_savings = self.baseline_generation_cost -\ self.proposed_generation_cost #~ print len(self.annual_electric_savings) #~ print 'self.annual_electric_savings',self.annual_electric_savings
[ "def calc_annual_electric_savings (self):\n self.annual_electric_savings = np.zeros(self.project_life)", "def calc_annual_electric_savings (self):\n raise NotImplementedError, \"should be implemented by child class to\" +\\\n \" create self.annual_electric_savings as an np.array, length\" +\\\n \" self.project_life, of dollar values(numbers)\"", "def calc_annual_electric_savings (self):\n price = self.diesel_prices\n #TODO add rural v non rural\n self.base_generation_cost = self.electric_diesel_reduction * price\n\n\n self.proposed_generation_cost = self.maintenance_cost\n\n self.annual_electric_savings = self.base_generation_cost - \\\n self.proposed_generation_cost\n #~ print 'self.annual_electric_savings',self.annual_electric_savings", "def calc_annual_heating_savings (self):\n raise NotImplementedError, \"should be implemented by child class to\" +\\\n \" create self.annual_heating_savings as an np.array, length\" +\\\n \" self.project_life, of dollar values(numbers)\"", "def get_total_savings_costs (self): # ex: eff(res) G59-V59\n try:\n return self.annual_total_savings\n except:\n return self.get_nan_range()", "def calc_annual_heating_savings (self):\n self.annual_heating_savings = self.baseline_HF_cost - \\\n self.proposed_HF_cost", "def calc_annual_heating_savings (self):\n price = (self.diesel_prices + self.cd['heating fuel premium'])\n\n #~ self.base_heating_cost =\n\n #~ self.proposed_heating_cost =\n\n\n\n\n self.annual_heating_savings = self.reduction_diesel_used * price\n #~ print 'self.annual_heating_savings',self.annual_heating_savings", "def calc_annual_heating_savings (self):\n price = self.diesel_prices + self.cd['heating fuel premium']\n maintenance = self.comp_specs['heat recovery o&m']\n self.annual_heating_savings = -1 * \\\n (maintenance + (self.lost_heat_recovery * price))", "def calc_savings_opportunities (self):\n rd = self.comp_specs['data']\n ## #HH\n self.opportunity_HH = self.init_HH -rd[\"BEES Number\"] -rd[\"Post-Retrofit Number\"]\n self.opportunity_HH = np.float64( self.opportunity_HH )\n #~ print self.opportunity_HH\n if self.opportunity_HH < 0:\n self.opportunity_HH = 0\n self.diagnostics.add_note(self.component_name,\n \"calculate Houses to retrofit was negative, setting to 0\" )\n\n ## % as decimal\n #~ self.percent_savings = rd[\"opportunity_total_percent_community_savings\"]\n #~ self.percent_savings = np.float64( self.percent_savings)\n area = np.float64(rd[\"Pre-Retrofit Avg Area (SF)\"])\n EUI = np.float64(rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"])\n avg_EUI_reduction = np.float64(rd[\"Post-Retrofit Avg. EUI Reduction\"])\n\n total = area * EUI\n\n\n # the one in each of these function calls is an identity\n amnt = np.float64(rd[\"Fuel Oil\"]) / 100.0\n self.savings_HF = avg_EUI_reduction * self.opportunity_HH * \\\n self.calc_consumption_by_fuel(amnt, total, 1,\n constants.mmbtu_to_gal_HF)\n\n amnt = np.float64(rd[\"Wood\"]) / 100.0\n self.savings_wood = avg_EUI_reduction * self.opportunity_HH * \\\n self.calc_consumption_by_fuel(amnt, total, 1,\n constants.mmbtu_to_cords)\n\n amnt = np.float64(rd[\"Utility Gas\"]) / 100.0\n self.savings_gas = avg_EUI_reduction * self.opportunity_HH * \\\n self.calc_consumption_by_fuel(amnt, total, 1,\n constants.mmbtu_to_Mcf)\n\n amnt = np.float64(rd[\"LP\"]) / 100.0\n self.savings_LP = avg_EUI_reduction * self.opportunity_HH * \\\n self.calc_consumption_by_fuel(amnt, total, 1,\n constants.mmbtu_to_gal_LP)\n\n amnt = np.float64(rd[\"Electricity\"]) / 100.0\n self.savings_kWh = avg_EUI_reduction * self.opportunity_HH * \\\n self.calc_consumption_by_fuel(amnt, total, 1,\n constants.mmbtu_to_kWh)\n #~ self.savings_coal\n #~ self.savings_solar\n #~ self.savings_other\n\n self.savings_mmbtu = self.savings_HF * (1/constants.mmbtu_to_gal_HF) +\\\n self.savings_wood * (1/constants.mmbtu_to_cords) +\\\n self.savings_gas * (1/constants.mmbtu_to_Mcf) +\\\n self.savings_kWh * (1/constants.mmbtu_to_kWh) +\\\n self.savings_LP* (1/constants.mmbtu_to_gal_LP)", "def calcAvg(self):\n\n avg = (self.project + self.midterm + self.final) / 3\n return avg", "def investment_calc(self, investment):\n \n \n #muni basis each year is the muni_amount + all prior year's interest\n self.muni_appreciated = round(investment[0] * (1+self.muni_roi),2)\n self.equity_appreciated = round(investment[1] * (1+self.equity_roi),2)\n \n #Interest and Dividends, pretax\n self.pretax_interest = round(investment[0]*self.muni_int,2)\n self.pretax_dividends = round(investment[1]*self.equity_div,2)\n \n muni_percent = self.pretax_interest/(self.pretax_interest+self.pretax_dividends)\n \n #Corp Tax Schedule\n if self.pretax_dividends+self.pretax_interest <= 50000:\n self.muni_int_earned = round(self.pretax_interest*(1-15/100),2)\n self.equity_div_earned = round(self.pretax_dividends*(1-15/100),2)\n elif (self.pretax_dividends+self.pretax_interest > 50000) & (self.pretax_dividends+self.pretax_interest <= 75000):\n #Will split the amount proportionally.\n self.muni_int_earned = round((self.pretax_interest-(7500*muni_percent))*(1-25/100),2)\n self.equity_div_earned = round((self.pretax_dividends-(7500*(1-muni_percent)))*(1-25/100),2)\n elif (self.pretax_dividends+self.pretax_interest > 75000) & (self.pretax_dividends+self.pretax_interest <= 100000):\n self.muni_int_earned = round((self.pretax_interest-(13750*muni_percent))*(1-34/100),2)\n self.equity_div_earned = round((self.pretax_dividends-(13750*(1-muni_percent)))*(1-34/100),2)\n elif (self.pretax_dividends+self.pretax_interest > 100000) & (self.pretax_dividends+self.pretax_interest <= 335000):\n self.muni_int_earned = round((self.pretax_interest-(22250*muni_percent))*(1-39/100),2)\n self.equity_div_earned = round((self.pretax_dividends-(22250*(1-muni_percent)))*(1-39/100),2)\n elif (self.pretax_dividends+self.pretax_interest > 335000) & (self.pretax_dividends+self.pretax_interest <= 10000000):\n self.muni_int_earned = round((self.pretax_interest-(113900*muni_percent))*(1-34/100),2)\n self.equity_div_earned = round((self.pretax_dividends-(113900*(1-muni_percent)))*(1-34/100),2)\n elif (self.pretax_dividends+self.pretax_interest > 10000000) & (self.pretax_dividends+self.pretax_interest <= 15000000):\n self.muni_int_earned = round((self.pretax_interest-(3400000*muni_percent))*(1-35/100),2)\n self.equity_div_earned = round((self.pretax_dividends-(3400000*(1-muni_percent)))*(1-35/100),2)\n elif (self.pretax_dividends+self.pretax_interest > 15000000) & (self.pretax_dividends+self.pretax_interest <= 18333333):\n self.muni_int_earned = round((self.pretax_interest-(5150000*muni_percent))*(1-38/100),2)\n self.equity_div_earned = round((self.pretax_dividends-(5150000*(1-muni_percent)))*(1-38/100),2)\n \n else:\n self.muni_int_earned = round(self.pretax_interest*(1-35/100),2)\n self.equity_div_earned = round(self.pretax_dividends*(1-35/100),2)\n return (self.muni_appreciated, self.muni_int_earned, self.equity_appreciated, self.equity_div_earned)", "def total_returns(self, years_inv = 10):\n #first calculate what your returns are over the investment only period.\n reserve = []\n muni_bases = []\n muni_ending = []\n muni_cap_appr = []\n muni_interest = []\n equity_bases = []\n equity_ending = []\n equity_cap_appr = []\n equity_div = []\n inv_year = []\n for start_year in range(0, years_inv):\n if start_year == 0:\n reserve.append(round(self.reserve_fund*1.01,2))\n ending_muni, interest, ending_equity, dividends = self.investment_calc([self.muni_yr1,self.eq_yr1])\n #munis\n muni_bases.append(self.muni_yr1)\n muni_ending.append(ending_muni)\n muni_cap_appr.append(ending_muni - self.muni_yr1)\n muni_interest.append(interest)\n #equities\n equity_bases.append(self.eq_yr1)\n equity_ending.append(ending_equity)\n equity_cap_appr.append(ending_equity-self.eq_yr1)\n equity_div.append(dividends)\n inv_year.append(start_year)\n else:\n #compounding the ending amount from the prior year plus the interest earned in the last year\n reserve.append(round(reserve.copy()[-1]*1.01,2))\n ending_muni, interest, ending_equity, dividends = self.investment_calc([muni_ending[start_year-1]+muni_interest[start_year-1]+self.muni_amt,equity_ending[start_year-1]+ equity_div[start_year-1]+self.equity_amt])\n muni_bases.append(self.muni_yr1 + self.muni_amt*(start_year) + sum(muni_interest.copy()))\n muni_ending.append(ending_muni)\n muni_cap_appr.append(ending_muni - muni_bases.copy()[start_year])\n muni_interest.append(interest)\n #equities\n equity_bases.append(self.eq_yr1 + self.equity_amt*(start_year) + sum(equity_div.copy()))\n equity_ending.append(ending_equity)\n equity_cap_appr.append(ending_equity-equity_bases.copy()[start_year])\n equity_div.append(dividends)\n inv_year.append(start_year)\n \n \n #bases needs to show the bases for each year.\n #bases would be 260K in year 0, 260K + 260K + last year's int in year 1, \n self.total_df = pd.DataFrame([inv_year,reserve, muni_bases, muni_ending, muni_cap_appr, muni_interest, equity_bases, equity_ending, equity_cap_appr, equity_div]).T.rename(columns = {0: 'Starting Year', 1:'reserve', 2:'muni_cost', 3:'muni_end_amt', 4: 'muni_capgain', 5:'net_int', 6: 'equity_cost', 7: 'equity_end_amt', 8: 'equity_cap_gain', 9:'net_div'}).set_index('Starting Year')\n return self.total_df\n #return (muni_bases, muni_ending, muni_cap_appr, muni_interest)", "def calc_internal_rate_of_return (self):\n self.irr = 0\n try:\n l = [-self.capital_costs] +\\\n self.annual_total_savings[:self.actual_project_life].tolist()\n self.irr = np.irr(l)\n except (AttributeError, ValueError, np.linalg.linalg.LinAlgError):\n pass", "def calculateATTStock():\n (InvestmentCalculator()) \\\n .set_stock_price(31.07) \\\n .set_shares(117) \\\n .set_payout_period(4) \\\n .set_investment_per_payout_period(3000) \\\n .set_years(3) \\\n .set_payout_ratio(0.51) \\\n .set_year_percentage_return(0.0) \\\n .should_reinvest_dividends(True) \\\n .compute()", "def calc_avg_consumption (self):\n # 500 average energy use, 12 months in a year. That's where the 6000.0\n # comes from.\n con_threshold = self.comp_specs['min kWh per household']\n yr = int(self.comp_specs['data']['Year'])\n #~ houses = int(self.comp_specs['data']['Total Occupied'])\n #~ r_con = self.forecast.base_res_consumption\n avg_con = float(self.comp_specs['data']['average kWh per house'])\n if not self.intertie_data is None:\n avg_con = self.intertie_data.get_item(\n 'Residential Energy Efficiency',\n 'data'\n )['average kWh per house']\n\n #~ self.avg_monthly_consumption = ave_con/12\n if (avg_con < con_threshold) or np.isnan(avg_con):\n avg_con = con_threshold\n self.diagnostics.add_note(self.component_name,\n (\"Average residential Electric consumption\"\n \" corrected to \"+ str(con_threshold)+\" kWh per year\"))\n self.avg_kWh_consumption_per_HH = avg_con\n self.diagnostics.add_note(self.component_name,\n \"Average consumption was \" + str(self.avg_kWh_consumption_per_HH) +\\\n \" in \" + str(yr))", "def find_eta_projection(self):\r\n \r\n # Get temporal range in terms of years\r\n timedelta = self.year_E_fore_gov[self.elms_E_fore_gov] - self.year_E_fore_gov[self.elms_E_fore_gov][0]\r\n # Number of years over time\r\n num_years = len(timedelta)\r\n \r\n self.t_eta_fit = np.zeros(num_years)\r\n \r\n for yr in range(0,num_years):\r\n \r\n self.t_eta_fit[yr] = timedelta[yr].days/365.25\r\n \r\n \r\n popt, _ = curve_fit(model_expected_eta,self.t_eta_fit,self.eta_gdp_fore[self.elms_E_fore_gov],p0=(0.7,0.1,0.01))\r\n \r\n self.eta_0 = popt[0]\r\n self.eta_b = popt[1]\r\n self.xi = popt[2]\r\n self.eta = model_expected_eta(self.t,self.eta_0,self.eta_b,self.xi)\r\n \r\n self.E_noncovid = model_emissions(self.eta,self.Y_noncovid)\r\n \r\n return", "def __init__(self, total_cost, ann_rate, ann_salary, portion_saved):\r\n\t\tself.total_cost = total_cost\r\n\t\tself.portion_down_payment = total_cost*0.25\r\n\t\tself.ann_rate = ann_rate\r\n\t\tself.monthly_salary = ann_salary/12\r\n\t\tself.portion_saved = portion_saved\r\n\t\tself.current_savings = [0.0,]\r\n\t\tself.months = 0\r\n\t\tself.new_saving = 0", "def annual_energy(self):\n return self['annual_energy']", "def basic_calculations() -> None:\n\n PropertyInfo.down_payment = \\\n WebScraper.price * UserValues.down_payment_percent\n PropertyInfo.loan = WebScraper.price - PropertyInfo.down_payment\n PropertyInfo.interest_rate_monthly = WebScraper.interest_rate / 12\n PropertyInfo.months = UserValues.years * 12\n PropertyInfo.property_taxes_monthly = WebScraper.property_taxes / 12" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate annual heating savings created by the project. Attributes
def calc_annual_heating_savings (self): price = self.diesel_prices + self.cd['heating fuel premium'] maintenance = self.comp_specs['heat recovery o&m'] self.annual_heating_savings = -1 * \ (maintenance + (self.lost_heat_recovery * price))
[ "def calc_annual_heating_savings (self):\n raise NotImplementedError, \"should be implemented by child class to\" +\\\n \" create self.annual_heating_savings as an np.array, length\" +\\\n \" self.project_life, of dollar values(numbers)\"", "def calc_annual_heating_savings (self):\n self.annual_heating_savings = self.baseline_HF_cost - \\\n self.proposed_HF_cost", "def calc_annual_heating_savings (self):\n price = (self.diesel_prices + self.cd['heating fuel premium'])\n\n #~ self.base_heating_cost =\n\n #~ self.proposed_heating_cost =\n\n\n\n\n self.annual_heating_savings = self.reduction_diesel_used * price\n #~ print 'self.annual_heating_savings',self.annual_heating_savings", "def calc_annual_electric_savings (self):\n self.annual_electric_savings = np.zeros(self.project_life)", "def calc_annual_electric_savings (self):\n raise NotImplementedError, \"should be implemented by child class to\" +\\\n \" create self.annual_electric_savings as an np.array, length\" +\\\n \" self.project_life, of dollar values(numbers)\"", "def calc_annual_electric_savings (self):\n price = self.diesel_prices\n #TODO add rural v non rural\n self.base_generation_cost = self.electric_diesel_reduction * price\n\n\n self.proposed_generation_cost = self.maintenance_cost\n\n self.annual_electric_savings = self.base_generation_cost - \\\n self.proposed_generation_cost\n #~ print 'self.annual_electric_savings',self.annual_electric_savings", "def calc_annual_electric_savings (self):\n costs = self.comp_specs['diesel generator o&m']\n\n for kW in costs.keys():\n try:\n if self.average_load < int(kW):\n maintenance = self.comp_specs['diesel generator o&m'][kW]\n break\n except ValueError:\n maintenance = self.comp_specs['diesel generator o&m'][kW]\n\n self.baseline_generation_cost = maintenance + \\\n (self.pre_intertie_generation_fuel_used * self.diesel_prices)\n\n maintenance = self.capital_costs * \\\n (self.comp_specs['percent o&m'] / 100.0)\n self.proposed_generation_cost = maintenance + \\\n self.intertie_offset_generation_fuel_used * \\\n self.intertie_diesel_prices\n self.annual_electric_savings = self.baseline_generation_cost -\\\n self.proposed_generation_cost\n #~ print len(self.annual_electric_savings)\n #~ print 'self.annual_electric_savings',self.annual_electric_savings", "def get_total_savings_costs (self): # ex: eff(res) G59-V59\n try:\n return self.annual_total_savings\n except:\n return self.get_nan_range()", "def investment_calc(self, investment):\n \n \n #muni basis each year is the muni_amount + all prior year's interest\n self.muni_appreciated = round(investment[0] * (1+self.muni_roi),2)\n self.equity_appreciated = round(investment[1] * (1+self.equity_roi),2)\n \n #Interest and Dividends, pretax\n self.pretax_interest = round(investment[0]*self.muni_int,2)\n self.pretax_dividends = round(investment[1]*self.equity_div,2)\n \n muni_percent = self.pretax_interest/(self.pretax_interest+self.pretax_dividends)\n \n #Corp Tax Schedule\n if self.pretax_dividends+self.pretax_interest <= 50000:\n self.muni_int_earned = round(self.pretax_interest*(1-15/100),2)\n self.equity_div_earned = round(self.pretax_dividends*(1-15/100),2)\n elif (self.pretax_dividends+self.pretax_interest > 50000) & (self.pretax_dividends+self.pretax_interest <= 75000):\n #Will split the amount proportionally.\n self.muni_int_earned = round((self.pretax_interest-(7500*muni_percent))*(1-25/100),2)\n self.equity_div_earned = round((self.pretax_dividends-(7500*(1-muni_percent)))*(1-25/100),2)\n elif (self.pretax_dividends+self.pretax_interest > 75000) & (self.pretax_dividends+self.pretax_interest <= 100000):\n self.muni_int_earned = round((self.pretax_interest-(13750*muni_percent))*(1-34/100),2)\n self.equity_div_earned = round((self.pretax_dividends-(13750*(1-muni_percent)))*(1-34/100),2)\n elif (self.pretax_dividends+self.pretax_interest > 100000) & (self.pretax_dividends+self.pretax_interest <= 335000):\n self.muni_int_earned = round((self.pretax_interest-(22250*muni_percent))*(1-39/100),2)\n self.equity_div_earned = round((self.pretax_dividends-(22250*(1-muni_percent)))*(1-39/100),2)\n elif (self.pretax_dividends+self.pretax_interest > 335000) & (self.pretax_dividends+self.pretax_interest <= 10000000):\n self.muni_int_earned = round((self.pretax_interest-(113900*muni_percent))*(1-34/100),2)\n self.equity_div_earned = round((self.pretax_dividends-(113900*(1-muni_percent)))*(1-34/100),2)\n elif (self.pretax_dividends+self.pretax_interest > 10000000) & (self.pretax_dividends+self.pretax_interest <= 15000000):\n self.muni_int_earned = round((self.pretax_interest-(3400000*muni_percent))*(1-35/100),2)\n self.equity_div_earned = round((self.pretax_dividends-(3400000*(1-muni_percent)))*(1-35/100),2)\n elif (self.pretax_dividends+self.pretax_interest > 15000000) & (self.pretax_dividends+self.pretax_interest <= 18333333):\n self.muni_int_earned = round((self.pretax_interest-(5150000*muni_percent))*(1-38/100),2)\n self.equity_div_earned = round((self.pretax_dividends-(5150000*(1-muni_percent)))*(1-38/100),2)\n \n else:\n self.muni_int_earned = round(self.pretax_interest*(1-35/100),2)\n self.equity_div_earned = round(self.pretax_dividends*(1-35/100),2)\n return (self.muni_appreciated, self.muni_int_earned, self.equity_appreciated, self.equity_div_earned)", "def annual_water_make_up_profile(self):\n return self.cooling_water_flow * 0.008", "def GetAnnualAverages(WYDataDF):\n \n return( AnnualAverages )", "def calcAvg(self):\n\n avg = (self.project + self.midterm + self.final) / 3\n return avg", "def calc_savings_opportunities (self):\n rd = self.comp_specs['data']\n ## #HH\n self.opportunity_HH = self.init_HH -rd[\"BEES Number\"] -rd[\"Post-Retrofit Number\"]\n self.opportunity_HH = np.float64( self.opportunity_HH )\n #~ print self.opportunity_HH\n if self.opportunity_HH < 0:\n self.opportunity_HH = 0\n self.diagnostics.add_note(self.component_name,\n \"calculate Houses to retrofit was negative, setting to 0\" )\n\n ## % as decimal\n #~ self.percent_savings = rd[\"opportunity_total_percent_community_savings\"]\n #~ self.percent_savings = np.float64( self.percent_savings)\n area = np.float64(rd[\"Pre-Retrofit Avg Area (SF)\"])\n EUI = np.float64(rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"])\n avg_EUI_reduction = np.float64(rd[\"Post-Retrofit Avg. EUI Reduction\"])\n\n total = area * EUI\n\n\n # the one in each of these function calls is an identity\n amnt = np.float64(rd[\"Fuel Oil\"]) / 100.0\n self.savings_HF = avg_EUI_reduction * self.opportunity_HH * \\\n self.calc_consumption_by_fuel(amnt, total, 1,\n constants.mmbtu_to_gal_HF)\n\n amnt = np.float64(rd[\"Wood\"]) / 100.0\n self.savings_wood = avg_EUI_reduction * self.opportunity_HH * \\\n self.calc_consumption_by_fuel(amnt, total, 1,\n constants.mmbtu_to_cords)\n\n amnt = np.float64(rd[\"Utility Gas\"]) / 100.0\n self.savings_gas = avg_EUI_reduction * self.opportunity_HH * \\\n self.calc_consumption_by_fuel(amnt, total, 1,\n constants.mmbtu_to_Mcf)\n\n amnt = np.float64(rd[\"LP\"]) / 100.0\n self.savings_LP = avg_EUI_reduction * self.opportunity_HH * \\\n self.calc_consumption_by_fuel(amnt, total, 1,\n constants.mmbtu_to_gal_LP)\n\n amnt = np.float64(rd[\"Electricity\"]) / 100.0\n self.savings_kWh = avg_EUI_reduction * self.opportunity_HH * \\\n self.calc_consumption_by_fuel(amnt, total, 1,\n constants.mmbtu_to_kWh)\n #~ self.savings_coal\n #~ self.savings_solar\n #~ self.savings_other\n\n self.savings_mmbtu = self.savings_HF * (1/constants.mmbtu_to_gal_HF) +\\\n self.savings_wood * (1/constants.mmbtu_to_cords) +\\\n self.savings_gas * (1/constants.mmbtu_to_Mcf) +\\\n self.savings_kWh * (1/constants.mmbtu_to_kWh) +\\\n self.savings_LP* (1/constants.mmbtu_to_gal_LP)", "def total_returns(self, years_inv = 10):\n #first calculate what your returns are over the investment only period.\n reserve = []\n muni_bases = []\n muni_ending = []\n muni_cap_appr = []\n muni_interest = []\n equity_bases = []\n equity_ending = []\n equity_cap_appr = []\n equity_div = []\n inv_year = []\n for start_year in range(0, years_inv):\n if start_year == 0:\n reserve.append(round(self.reserve_fund*1.01,2))\n ending_muni, interest, ending_equity, dividends = self.investment_calc([self.muni_yr1,self.eq_yr1])\n #munis\n muni_bases.append(self.muni_yr1)\n muni_ending.append(ending_muni)\n muni_cap_appr.append(ending_muni - self.muni_yr1)\n muni_interest.append(interest)\n #equities\n equity_bases.append(self.eq_yr1)\n equity_ending.append(ending_equity)\n equity_cap_appr.append(ending_equity-self.eq_yr1)\n equity_div.append(dividends)\n inv_year.append(start_year)\n else:\n #compounding the ending amount from the prior year plus the interest earned in the last year\n reserve.append(round(reserve.copy()[-1]*1.01,2))\n ending_muni, interest, ending_equity, dividends = self.investment_calc([muni_ending[start_year-1]+muni_interest[start_year-1]+self.muni_amt,equity_ending[start_year-1]+ equity_div[start_year-1]+self.equity_amt])\n muni_bases.append(self.muni_yr1 + self.muni_amt*(start_year) + sum(muni_interest.copy()))\n muni_ending.append(ending_muni)\n muni_cap_appr.append(ending_muni - muni_bases.copy()[start_year])\n muni_interest.append(interest)\n #equities\n equity_bases.append(self.eq_yr1 + self.equity_amt*(start_year) + sum(equity_div.copy()))\n equity_ending.append(ending_equity)\n equity_cap_appr.append(ending_equity-equity_bases.copy()[start_year])\n equity_div.append(dividends)\n inv_year.append(start_year)\n \n \n #bases needs to show the bases for each year.\n #bases would be 260K in year 0, 260K + 260K + last year's int in year 1, \n self.total_df = pd.DataFrame([inv_year,reserve, muni_bases, muni_ending, muni_cap_appr, muni_interest, equity_bases, equity_ending, equity_cap_appr, equity_div]).T.rename(columns = {0: 'Starting Year', 1:'reserve', 2:'muni_cost', 3:'muni_end_amt', 4: 'muni_capgain', 5:'net_int', 6: 'equity_cost', 7: 'equity_end_amt', 8: 'equity_cap_gain', 9:'net_div'}).set_index('Starting Year')\n return self.total_df\n #return (muni_bases, muni_ending, muni_cap_appr, muni_interest)", "def calculateSavings():\n args = request.json\n saving = float(args[\"saving\"])\n current = float(args[\"current\"])\n time = float(args[\"time\"]) * 12\n rate = float(args[\"rate\"])\n\n if rate == 0:\n top = saving - current\n return jsonify(top/time)\n\n top = saving - (current * ((1+rate)**time))\n bottom = ((1+rate)**time) - (1+rate)\n\n ans = top/(bottom/rate)\n ans /= 12\n return jsonify(ans)", "def computeInterest(self):\r\n interest = self.balance * SavingsAccount.RATE\r\n self.deposit(interest)\r\n return interest", "def calculateATTStock():\n (InvestmentCalculator()) \\\n .set_stock_price(31.07) \\\n .set_shares(117) \\\n .set_payout_period(4) \\\n .set_investment_per_payout_period(3000) \\\n .set_years(3) \\\n .set_payout_ratio(0.51) \\\n .set_year_percentage_return(0.0) \\\n .should_reinvest_dividends(True) \\\n .compute()", "def annual_fee(self, working_months, year, with_bpjs=True):\n monthly_bpjs = []\n\n total_salary = self.base_salary\n if self.is_salary_allowances is True:\n fixed_allowances = self.summarize( self.fixed_allowances )\n non_fixed_allowances = self.summarize( self.non_fixed_allowances )\n total_salary = total_salary + non_fixed_allowances + fixed_allowances\n #end if\n\n # initialize variable for storing the annual bpjs\n annual_c_old_age_insurance = 0\n annual_i_old_age_insurance = 0\n annual_c_pension_insurance = 0\n annual_i_pension_insurance = 0\n annual_c_health_insurance = 0\n annual_i_health_insurance = 0\n annual_death_insurance = 0\n annual_accident_insurance = 0\n\n if with_bpjs is True:\n # only calculate bpjs if is enabled and automatically set everthing to zero when is false\n start_month = 1\n for month in range(start_month, working_months+1):\n\n company_old_age_insurance = 0\n individual_old_age_insurance = 0\n if self.old_age_insurance_status is True:\n company_old_age_insurance = \\\n self._company_old_age_insurance(total_salary)\n\n individual_old_age_insurance = \\\n self._individual_old_age_insurance(total_salary)\n #end if\n\n company_pension_insurance = 0\n individual_pension_insurance = 0\n if self.pension_insurance_status is True:\n company_pension_insurance = \\\n self._company_pension_insurance(total_salary, month, year)\n\n individual_pension_insurance = \\\n self._individual_pension_insurance(total_salary, month, year)\n #end if\n\n company_health_insurance = 0\n individual_health_insurance = 0\n if self.health_insurance_status is True:\n company_health_insurance = \\\n self._company_health_insurance(total_salary)\n\n individual_health_insurance = \\\n self._individual_health_insurance(total_salary)\n #end if\n\n death_insurance = 0\n if self.death_insurance_status is True:\n death_insurance = self._death_insurance(total_salary)\n #end if\n\n accident_insurance = 0\n if self.accident_insurance_status is True:\n accident_insurance = \\\n self._accident_insurance(total_salary, \\\n self.industry_risk_rate)\n #end if\n\n monthly = {\n \"old_age_insurance\" : {\n \"company\" : company_old_age_insurance,\n \"individual\" : individual_old_age_insurance,\n },\n \"pension_insurance\" : {\n \"company\" : company_pension_insurance,\n \"individual\" : individual_pension_insurance,\n },\n \"health_insurance\" : {\n \"company\" : company_health_insurance,\n \"individual\" : individual_health_insurance,\n },\n \"death_insurance\" : death_insurance,\n \"accident_insurance\" : accident_insurance\n }\n\n monthly_bpjs.append(monthly)\n\n annual_c_old_age_insurance = annual_c_old_age_insurance \\\n + company_old_age_insurance\n\n annual_i_old_age_insurance = annual_i_old_age_insurance \\\n + individual_old_age_insurance\n\n annual_c_pension_insurance = annual_c_pension_insurance \\\n + company_pension_insurance\n\n annual_i_pension_insurance = annual_i_pension_insurance \\\n + individual_pension_insurance\n\n annual_c_health_insurance = annual_c_health_insurance \\\n + company_health_insurance\n\n annual_i_health_insurance = annual_i_health_insurance \\\n + individual_health_insurance\n\n annual_death_insurance = annual_death_insurance\\\n + death_insurance\n\n annual_accident_insurance = annual_accident_insurance\\\n + accident_insurance\n #end for\n\n annual_bpjs = {\n \"old_age_insurance\" : {\n \"company\" : annual_c_old_age_insurance,\n \"individual\" : annual_i_old_age_insurance,\n },\n \"pension_insurance\" : {\n \"company\" : annual_c_pension_insurance,\n \"individual\" : annual_i_pension_insurance,\n },\n \"health_insurance\" : {\n \"company\" : annual_c_health_insurance,\n \"individual\" : annual_i_health_insurance,\n },\n \"death_insurance\" : annual_death_insurance,\n \"accident_insurance\" : annual_accident_insurance\n }\n return annual_bpjs", "def adding_average_of_physics_and_math():\n data_frame[AVERAGE_PHYSMATH] = (data_frame[PHYSICS] + data_frame[MATH]) / 2\n return data_frame" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get total fuel saved. Returns float the total fuel saved in gallons
def get_fuel_total_saved (self): #~ print self.lost_heat_recovery #~ print self.intertie_offset_generation_fuel_used #~ print self.pre_intertie_generation_fuel_used #~ gen_eff = self.cd["diesel generation efficiency"] #~ fuel_used = self.intertie_offset_generation / gen_eff generation_diesel_reduction = \ np.array(self.pre_intertie_generation_fuel_used\ [:self.actual_project_life]) return - np.array(self.lost_heat_recovery[:self.actual_project_life]) +\ generation_diesel_reduction
[ "def get_fuel_total_saved (self):\n return self.electric_diesel_reduction + self.reduction_diesel_used", "def total_saved_fuel(self) -> int:\n return float(self._state.attributes[SERVICE_ALL_TRIPS]['totalSavedFuel'])", "def get_fuel_total_saved (self):\n base_heat = \\\n self.baseline_HF_consumption[:self.actual_project_life]\n post_heat = \\\n self.proposed_HF_consumption[:self.actual_project_life]\n\n return (base_heat - post_heat) * constants.mmbtu_to_gal_HF", "def total_energy(self):\n return self.llg.effective_field.total_energy()", "def get_total_fitness(self):\n \n #Se regresa el valor concerniente al Fitness total de la Población.\n return self.__total_fitness", "def get_total_energy(parameters):\n return orm.Float(parameters.get_attribute('energy'))", "def total_energy(self):\n return self._total_energy", "def totalValue(self):\n\n\t\tvalue = 0\n\t\tfor bottle in self.bottles:\n\t\t\tvalue += bottle.inflatedCost\n\n\t\treturn value", "def calculate_total_fuel(filename):\n return sum([calculate_fuel_from_mass(mass) for mass in read_mass_from_file(filename)])", "def total_mass(self):\n return self.mass + self.fuel", "def get_total_haberes(self):\n return float(self.input.get_text(liquidaciones_historicas_catalog.TOTAL_HABERES).replace(\".\", \"\"))", "def total_potential_energy(self) -> Number:\n return self._currentTotPot", "def total(self) -> float:\n return self._total", "def cargo_fuel(self):\n return self._cargo_fuel", "def get_tot(self):\n return self.tot", "def valor_total_fatura(self):\n return self._valor_total_fatura", "def get_total(self) -> float:\n if self.__open:\n raise RuntimeError(\"Cash drawer must be closed to count.\")\n total: float = 0.0\n for denom in CashDenomination:\n total += self.__contents[denom] * denom.amount\n return total", "def read_total_energy(log_file):\n fh = open(log_file, \"r\")\n lines = fh.readlines()\n fh.close()\n \n enTot = None\n for l in lines:\n if \"SCF Done: E(\" in l:\n words = l.split()\n enTot = float(words[4])\n \n if enTot == None:\n raise FormatError(\"No SCF energy found in gaussian output file %s.\" % log_file)\n return enTot", "def joker_fuel(self) -> int:\n return self.bingo_fuel + 1000" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get your current running jobs on the Sherlock cluster
def running_jobs_sherlock(): user = os.environ['USER'] return subprocess.check_output(['squeue', '-u',user,'-o','%Z']).split()[1:]
[ "def get_current_jobs(ssh):\n stdin, stdout, stderr = ssh.exec_command('qstat')\n\n running_jobs = []\n for line in stdout.readlines():\n if '.awonmgr2' in line:\n jobid = line.split('.awonmgr2')[0]\n running_jobs.append(jobid)\n \n return running_jobs", "def show_jobs(self):\n load_environment()\n cluster_name = str(self.ui.cluster_names_box.currentText())\n if not cluster_name:\n raise NoClustersError()\n self.cluster = show_cluster(name=cluster_name)\n node = self.cluster.get_access_node()\n jobs = list(run_squeue(node).values())\n return jobs", "def get_jobs(self):\r\n\r\n # TODO: add jobs as well..\r\n return list(JOBS.keys())", "def _job_ls(args: dict, cluster: Cluster):\n jobs = cluster.jobs()\n\n if jobs is None:\n return\n\n logging.info('{} jobs found'.format(len(jobs)))\n for j in jobs:\n logging.info(j.metadata.name)\n\n return", "def _get_jobs():\n return _get_bigquery_service().jobs()", "def get_all_jobs():\n return base_jobs.JobMetaclass.get_all_jobs()", "def list_jobs(self) -> list:\n return self.conn.get_jobs()", "def jobs():\n app.logger.info('Retrieving jobs')\n return jsonify(get_hive().get_all_results(app.analysis))", "def list_jobs():\n\n name_to_job_details = redis_controller.get_name_to_job_details()\n return list(name_to_job_details.values())", "def jobs():\n result = []\n out = subprocess.check_output([\"/bin/launchctl\", \"list\"]).decode()\n for row in out.splitlines()[1:]:\n result.append(Job(row))\n return result", "def get_job_list(self):\n return self.job_list", "def get_job_details():\n server = get_server_instance()\n for job_name, job_instance in server.get_jobs():\n print 'Job Name:%s' % job_instance.name\n print 'Job Description:%s' % (job_instance.get_description())\n print 'Is Job running:%s' % (job_instance.is_running())\n print 'Is Job enabled:%s' % (job_instance.is_enabled())", "def get_running_queues():\n running_queues = []\n\n if not is_running(\"celery worker\"):\n return running_queues\n\n procs = get_procs(\"celery\")\n for _, lcmd in procs:\n lcmd = list(filter(None, lcmd))\n cmdline = \" \".join(lcmd)\n if \"-Q\" in cmdline:\n running_queues.extend(lcmd[lcmd.index(\"-Q\") + 1].split(\",\"))\n\n running_queues = list(set(running_queues))\n\n return running_queues", "def list():\n\treturn _jobs.all()", "def get_all_jobs():\n jobs = []\n\n for app in get_apps():\n try:\n module = app.__name__[:-6] + 'jobs'\n jobs.extend(get_jobs_from_module(module))\n except ImportError:\n pass\n\n return jobs", "def get_workers(self):\n return self.workers", "def getWorkers(self):\n return self.workers", "async def list_jobs(self) -> Dict[str, JobInfo]:\n return await self._job_info_client.get_all_jobs()", "def current_job(self):\n assert(ExecutorThread.executor_object is not None)\n return self.__job" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
simply sends a message to the client address specified.
def send_net_message_client(message, client_addr): serverSocket.sendto(message, client_addr)
[ "def send_to_client(self, msg):\r\n msg += \"\\r\\n\"\r\n self.client.sendall(msg.encode('utf-8'))\r\n log_message = \"Sent to client at {0}: {1}\".format(self.address, msg)\r\n self.output_and_log(log_message)", "def send_message(self, name, message):\n if message == \"\":\n return\n clients = [client for client in self.clients if client.name == name]\n if not clients:\n raise Exception(\"Client not found\")\n for client in clients:\n client.send(chr(129))\n length = len(message)\n if length <= 125:\n client.send(chr(length))\n elif 126 <= length <= 65535:\n client.send(chr(126))\n client.send(struct.pack(\">H\", length))\n else:\n client.send(chr(127))\n client.send(struct.pack(\">Q\", length))\n client.send(message)", "def send(self, msg, client):\n\n if client in self.clients:\n\n print (\"Replying to {} with {}...\".format(client.peerstr, msg))\n client.sendMessage(json.dumps(msg)) # TODO: superize this so that\n # the client's sendMessage method\n # handles the json serializing", "def Send(data, address):\n reply = \"(Server) \" + data\n sendLock.acquire()\n sock.sendto(reply.encode('ascii'), address)\n sendLock.release()", "def sendToClient(self, client_id, message_type, message):\n if not client_id in self.client_to_socket:\n raise ValueError(\"The client with id {} does not exist\".format(client_id))\n self.sendToSocket(self.client_to_socket[client_id],message_type,message)", "def sendmsg(self, msg):\n self.client.send(msg.encode())", "def send(self, address, msg):\n payload = pickle.dumps(msg)\n self.socket.sendto(payload, address)", "async def send_to_user(self, user: User, msg: Msg, address: str = None):\n if address is None:\n address = user.current_address\n\n await self.send(msg, address)", "def send_stun(self, message, addr):\n logger.debug('%s > %s %s', self, addr, message)\n self.transport.sendto(bytes(message), addr)", "def send_message(self, user, message):\n buffer = \"{} {} {}{}\".format(protocol.SEND, user, message, protocol.MESSAGE_END)\n self.send(buffer)", "def send_message(current_client, stream):\n current_client.sendall(stream)", "def broadcast(self, msg):\n for client in self.addresses:\n client.send(bytes(msg, \"utf-8\"))", "def send_message(self, message):\n # Internet socket and TCP\n send_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n message = \"<msg><{0}><{1}>{2}\".format(self.name, self.friend, message)\n\n try:\n send_socket.connect(self.remote)\n except ConnectionRefusedError:\n logging.warning(\"Could not connect to {0}\".format(self.remote))\n return\n\n logging.debug(\"Sending message {0} to {1}\".format(message, self.remote))\n send_socket.send(message.encode('latin1'))\n send_socket.close()", "def send(self, client, data):\n try:\n client.send(data)\n except Exception:\n self.clients.remove(client)", "def broadcast_message(msg: str):\r\n\tfor ip in _clients.keys():\r\n\t\tsend_message(ip, msg)", "def client(message=None):\n host = get_settings('host', get_settings('address', 'localhost'))\n port = get_settings('port', 25252)\n alive = True\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(.1)\n\n try:\n sock.connect((host, port))\n except (ConnectionRefusedError, socket.timeout):\n alive = False\n\n if alive is True:\n if message is not None:\n try:\n sock.sendall(message.encode('utf8'))\n except (ConnectionRefusedError, socket.timeout):\n alive = False\n sock.close()\n return alive", "def send(self, message_body: str, target: str):\n\t\tif target == 'local':\n\t\t\tself.client_process(message_body)\n\t\telse:\n\t\t\twith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n\t\t\t\ttry:\n\t\t\t\t\tsock.settimeout(1)\n\t\t\t\t\tsock.connect((target, self.channel_port))\n\t\t\t\t\tsock.send(message_body.encode())\n\t\t\t\texcept socket.timeout:\n\t\t\t\t\tself.registry.delete_ip(target)", "def send(self, address: Address, packet: StrictPacket):\n with self._clientDictLock:\n self._clients[address].send(packet)", "def send_message(message):\n recipient_id = message.recipient.get_id()\n try:\n message.send()\n logging.info('Successfully sent message to {}:\\n{}'\n .format(recipient_id, message.content))\n except Exception:\n logging.error('Failed to send message to {}.'\n .format(recipient_id), exc_info=True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the payee_wallet_id of this EscrowTransactionResponse.
def payee_wallet_id(self, payee_wallet_id): self._payee_wallet_id = payee_wallet_id
[ "def payer_wallet_id(self, payer_wallet_id):\n\n self._payer_wallet_id = payer_wallet_id", "def payeeid(self, payeeid):\n self._payeeid = payeeid", "def payer_id(self, payer_id):\n if payer_id is None:\n raise ValueError(\"Invalid value for `payer_id`, must not be `None`\")\n\n self._payer_id = payer_id", "def payor_id(self, payor_id):\n\n self._payor_id = payor_id", "def payee_zip(self, payee_zip):\n\n self._payee_zip = payee_zip", "def payeeid(self):\n return self._payeeid", "def set_merchant_transaction_id(self, transaction_id):\n self.merchant_transaction_id = transaction_id", "def payee_name(self, payee_name):\n\n self._payee_name = payee_name", "def payee_state(self, payee_state):\n\n self._payee_state = payee_state", "def wallet(self, wallet: Wallet):\n if wallet is None:\n raise ValueError(\"Invalid value for `wallet`, must not be `None`\") # noqa: E501\n\n self._wallet = wallet", "def transaction_id(self, transaction_id):\n\n self._transaction_id = transaction_id", "def transaction_id(self, transaction_id):\n self._transaction_id = transaction_id", "def merchant_id(self, merchant_id):\n\n self._merchant_id = merchant_id", "def earnings_rate_id(self, earnings_rate_id):\n\n self._earnings_rate_id = earnings_rate_id", "def id_transacao_estorno(self, id_transacao_estorno):\n self._id_transacao_estorno = id_transacao_estorno", "def add(self, wallet: WalletType) -> None:\n if isinstance(wallet, tuple):\n wallet = Wallet.from_tuple(wallet)\n self._wallets[(wallet.exchange.id, wallet.instrument.symbol)] = wallet", "def get_wallet(self, walletId):\n return", "def trade_id(self, trade_id):\n\n self._trade_id = trade_id", "def payee(self, payee_id: str):\n return get_from_list(self.payees, \"id\", payee_id)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the payer_wallet_id of this EscrowTransactionResponse.
def payer_wallet_id(self, payer_wallet_id): self._payer_wallet_id = payer_wallet_id
[ "def payee_wallet_id(self, payee_wallet_id):\n\n self._payee_wallet_id = payee_wallet_id", "def payer_id(self, payer_id):\n if payer_id is None:\n raise ValueError(\"Invalid value for `payer_id`, must not be `None`\")\n\n self._payer_id = payer_id", "def payor_id(self, payor_id):\n\n self._payor_id = payor_id", "def payeeid(self, payeeid):\n self._payeeid = payeeid", "def payer_id(self):\n return self._payer_id", "def dealer_id(self, dealer_id):\n\n self._dealer_id = dealer_id", "def set_merchant_transaction_id(self, transaction_id):\n self.merchant_transaction_id = transaction_id", "def merchant_id(self, merchant_id):\n\n self._merchant_id = merchant_id", "def wallet(self, wallet: Wallet):\n if wallet is None:\n raise ValueError(\"Invalid value for `wallet`, must not be `None`\") # noqa: E501\n\n self._wallet = wallet", "def transaction_id(self, transaction_id):\n\n self._transaction_id = transaction_id", "def response_id(self, response_id):\n\n self._response_id = response_id", "def advertiser_id(self, advertiser_id):\n\n self._advertiser_id = advertiser_id", "def transaction_id(self, transaction_id):\n self._transaction_id = transaction_id", "def pitcher_id(self, pitcher_id):\n\n self._pitcher_id = pitcher_id", "def set_retailer(self, retailer_id):\n self._retailer = retailer_id", "def payment_id(self, payment_id):\n\n self._payment_id = payment_id", "def get_wallet(self, walletId):\n return", "def signer_id(self, signer_id):\n\n self._signer_id = signer_id", "def seller(self, seller):\n\n self._seller = seller" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the withdrawn of this EscrowTransactionResponse.
def withdrawn(self, withdrawn): self._withdrawn = withdrawn
[ "def withdraw(self, **params):\n # force a name for the withdrawal if one not set\n if 'coin' in params and 'name' not in params:\n params['name'] = params['coin']\n return self._request_margin_api('post', 'capital/withdraw/apply', True, data=params)", "def withdraw(self, **params):\n # force a name for the withdrawal if one not set\n if 'asset' in params and 'name' not in params:\n params['name'] = params['asset']\n res = self._request_withdraw_api('post', 'withdraw.html', True, data=params)\n if not res.get('success'):\n raise BinanceWithdrawException(res['msg'])\n return res", "def withdrawMoney(self, withdraw_amount):\r\n if (self.balance_amt - withdraw_amount) > 0:\r\n self.balance_amt = self.balance_amt - withdraw_amount\r\n else:\r\n raise WithdrawError #Exception('Overdraft withdrawal Error. Cannot withdraw more than amount in account balance: {}'.format(self.balance_amt))\r", "def withdraw(self, amount, trigger_transaction, trans=None):\n\n #\n # validates the amount is positive\n self.validate_amount(amount)\n\n #\n # Validate the user has the amount for the withdraw\n if not self.check_sufficient_funds(amount):\n raise OverdraftException(self.user.username)\n\n #\n # creates the transaction\n category = TransactionType.objects.get(pk=TransactionTypeConstants.BonusCashWithdraw.value)\n\n #\n # makes the amount negative because it is a withdrawal\n self.create(category, -amount, trans)\n self.transaction_detail.trigger_transaction = trigger_transaction\n self.transaction_detail.save()\n\n Logger.log(ErrorCodes.INFO,\"Bonus Cash Withdraw\", self.user.username+\" withdrew \"+str(amount)+\" \"+self.accountName+\" from their account.\")", "def withdraw(self, amount):\n\n if self.balance < Decimal('0.00'):\n raise ValueError('You can not overdraft your account')\n\n # if withdraw is less than balance, subtract amount from balance\n self.balance -= amount", "def withdrawMoney(self, withdraw_amount):\r\n self.balance_amt = self.balance_amt - withdraw_amount", "def Withdrawn(self, default=[None]):\n return self.data.get('metadata', {}).get('withdrawn', default)", "def post_cancel_withdraw(self, withdraw_id: 'int') -> int:\n params = {\n \"withdraw-id\": withdraw_id\n }\n\n from huobi.service.wallet.post_cancel_withdraw import PostCancelWithdrawService\n return PostCancelWithdrawService(params).request(**self.__kwargs)", "def withdraw(self, amount: float) -> None:\n if self._has_enough_balance(amount):\n self._balance -= amount\n print(f'Withdrawal completed! Current balance: ${self._balance}')", "def withdraw(self, amount):\n self.balance -= amount\n \n if amount > self.balance:\n print(\"Insufficient funds\")\n self.balance -= 10\n\n else:\n print(f\"Amount Withdrawn: ${amount}\")", "def withdraw(self,amount):\n self.amount = amount\n self.balance -= self.amount\n\n if self.balance <= 0:\n print(f\"Balance: {self.balance}\")\n self.balance -= 10\n return \"Insufficient funds. Balance Charged $10.\\n\"\n else:\n print(f\"Amount Withdrawn: ${self.amount}.\")\n return \"\\n\"", "def withdraw(self, amount):\n self.withdrw = amount\n \n if (self.balance-self.withdrw) < 0:\n self.balance = self.balance - 5 - self.withdrw\n self.fee += 5\n else:\n self.balance -= self.withdrw", "def withdraw(account_id: str, amount: float, atm: bool = False, date: str = None):\n try:\n transaction_id = banking_app.withdraw(UUID(account_id), amount, atm)\n transaction_id_string: str = style(str(transaction_id))\n typer.echo(f\"Withdrawal successful, transaction id is {transaction_id_string}\")\n except AccountError as e:\n typer.echo(style(str(e) + \"!!\", is_success=False))", "def handlePropagateWithdraw(self, event):\n\n log_msg('-----Handling Propagated Withdraw-------')\n self.balance -= event.money\n return {'interface': bankingsystem_pb2.propagate_withdraw, 'result': bankingsystem_pb2.success}", "def withdraw(self, amount):\n if amount > self.balance:\n raise ValueError('insufficient funds to withdraw $%.2f' % amount)\n self.balance -= amount\n return self.balance", "def add_withdraw(self, withdraw_id: str, tx_id: str, apply_time: int, asset: str, amount: float, fee: float,\n auto_commit: bool = True):\n row = (withdraw_id, tx_id, apply_time, asset, amount, fee)\n self.add_row(tables.SPOT_WITHDRAW_TABLE, row, auto_commit=auto_commit)", "def withdraw(self, amount: int) -> int:\n if self.balance - amount >= 0:\n self.balance = self.balance - amount\n self.transactions.append(f'Withdrew from Account: -{amount}')\n return self.balance\n pass", "def withdraw(self, amount):\n self.balance -= amount", "def withdraw(self, amount):\n self.__balance -= amount\n \n if self.__balance < 0:\n self.__balance -= 5\n self.__fees_paid += 5" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the escrow_address of this EscrowTransactionResponse.
def escrow_address(self, escrow_address): self._escrow_address = escrow_address
[ "def set_address(self, address):\n pass", "def set_address(self, a_address):\n self.set_parameter('address', a_address)\n return self", "def address(self, address):\n self._address = address", "def address(self, value):\n self.api_args['rdata']['address'] = value\n self._update_record(self.api_args)\n if self._implicitPublish:\n self._address = value", "def company_address(self, company_address):\n\n self._company_address = company_address", "def address(self, value):\n if 'rdata' not in self.api_args:\n self.api_args['rdata'] = {}\n self.api_args['rdata']['address'] = value\n self._update_record(self.api_args)\n if self._implicitPublish:\n self._address = value", "def set_return_address(self, name, organization, address, city, state, zipcode):\n self.return_address.name = name\n self.return_address.organization = organization\n self.return_address.address = address\n self.return_address.city = city\n self.return_address.state = state\n self.return_address.zipcode = zipcode", "def _set_address(self, v, load=False):\n try:\n t = YANGDynClass(v,base=[unicode,unicode,unicode,unicode,unicode,], is_leaf=True, yang_name=\"address\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True)\n except (TypeError, ValueError):\n raise ValueError(\"\"\"address must be of a type compatible with base=[unicode,unicode,unicode,unicode,unicode,], is_leaf=True, yang_name=\"address\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True\"\"\")\n self.__address = t\n if hasattr(self, '_set'):\n self._set()", "def exchange_address(self, value):\n self.exchange_address_ = None if value is None else get_clean_address_or_throw(value)", "def set_contract_addr(self, addr):\n\t\tself.contract_addr = addr\n\t\tself._bank_inst = self._w3.eth.contract(\n\t\t\taddress=self.contract_addr,\n\t\t\tabi=self._iface[\"abi\"],\n\t\t)", "def set_address(self, address):\n if address == \"\":\n self.address = Address(\"\", \"\", \"\")\n else:\n self.address = address", "def _set_address(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name=\"address\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/mpls', defining_module='openconfig-mpls', yang_type='leafref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"address must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=unicode, is_leaf=True, yang_name=\"address\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/mpls', defining_module='openconfig-mpls', yang_type='leafref', is_config=False)\"\"\",\n })\n\n self.__address = t\n if hasattr(self, '_set'):\n self._set()", "def amended_address(self, amended_address):\n\n self._amended_address = amended_address", "def set_address(self,address): \n new_address = self._format_address(address)\n self.rs485.write_command('#00?8 {}'.format(new_address))\n self.rs485.clear_buffers()\n time.sleep(0.2)", "def setEthaddr(self):\n\t\tself.ethaddr = self.settings.getKeyValue('ethaddr')\n\t\tself.socket.send('setenv ethaddr ' + self.ethaddr+'\\r', 1)\n\t\treturn None", "def address(self):\n self._address = None", "def SetAddress(self, address):\n try:\n if address.startswith('0x'):\n self.address = int(address, 16)\n else:\n self.address = int(address)\n except ValueError:\n pass", "def trading_address_postcode(self, trading_address_postcode):\n\n self._trading_address_postcode = trading_address_postcode", "def address(self, new_address):\n house_num, street_name, apt_num = new_address\n self._address.house_num = house_num\n self._address.street_name = street_name\n self._address.apt_num = apt_num" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the record_status of this EscrowTransactionResponse.
def record_status(self, record_status): self._record_status = record_status
[ "def _set_status(self):\n result = self._get_status()\n if result and result[0]['state'] == 'aborted':\n raise Exception(\"Aborted because the status flag is set to 'aborted' in dynamodb\")\n\n # record the status\n self.status['timestamp'] = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n self.db_handler.update_item({'api_version': TsV2CatalogHandler.api_version}, self.status)", "def line_status(self, line_status):\n\n self._line_status = line_status", "def account_status(self, account_status):\n\n self._account_status = account_status", "def card_status(self, card_status):\n\n self._card_status = card_status", "def ecr_set_status(self, response):\n assert len(response) == 6\n assert response[0:2] == b\"\\nS\"\n assert response[4:5] == b\"\\r\"\n self.ecr_status = response[2:4]", "def update_record_status(self, context, payload):\n access_token = util.get_access_token(context[\"headers\"])\n record = ZohorecruitRecord(**payload)\n endpoint = f\"{record.module}/status\"\n record_data = {\n \"data\": [\n {\n \"ids\": [record.record_id],\n \"Candidate_Status\": record.status,\n \"comments\": record.comments\n }\n ],\n \"trigger\":[record.trigger]\n }\n response = util.rest(\"PUT\",endpoint,access_token,record_data)\n return json.loads(response.text)", "def status(self, status):\n self._status = status", "def status(self, status):\n\n self._status = status", "def estatus(self, estatus: int):\n\n self._estatus = estatus", "def set_status(self, status: Status) -> None:\n if status.status_code == StatusCode.ERROR:\n self.elastic_span.outcome = constants.OUTCOME.FAILURE\n elif status.status_code == StatusCode.OK:\n self.elastic_span.outcome = constants.OUTCOME.SUCCESS\n else:\n self.elastic_span.outcome = constants.OUTCOME.UNKNOWN", "def identity_status(self, identity_status):\n\n self._identity_status = identity_status", "def job_status(self, job_status):\n\n self._job_status = job_status", "def status(self, status):\n self.__status = status", "def set_status(self, status):\n self.status = status\n self.save()", "def risk_status(self, risk_status):\n self._risk_status = risk_status", "def record_type_enum(self, record_type_enum):\n\n self._record_type_enum = record_type_enum", "def response_status_id(self, response_status_id):\n\n self._response_status_id = response_status_id", "def set_status(self, status):\n self.response_dict(status=status)\n self.response.set_status(code=status)", "def region_status(self, region_status):\n self._region_status = region_status" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the update_date of this EscrowTransactionResponse.
def update_date(self, update_date): self._update_date = update_date
[ "def updated_date(self, updated_date):\n\n self._updated_date = updated_date", "def updated_date(self, updated_date):\n self._updated_date = updated_date", "def set_latest_update_date(self):\n metadata = self.info()\n metadata.updated_at = dt.datetime.now()\n self.commit()", "def updated_date_utc(self, updated_date_utc):\n\n self._updated_date_utc = updated_date_utc", "def update_date(self) -> str:\n return self._update_date", "def update_at(self, update_at):\n self._update_at = update_at", "def updated_date(self):\n return self._updated_date", "def etadate(self, etadate):\n\n self._etadate = etadate", "def set_date(self, date):\n self.date = date\n return", "def updated_at(self, updated_at: \"datetime\"):\n self._attrs[\"updatedAt\"] = updated_at", "def updated_at(self, updated_at):\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def update_time(self, update_time):\n\n self._update_time = update_time", "def lastupdatedate(self, lastupdatedate):\n\n self._lastupdatedate = lastupdatedate", "def date(self, date):\n\n self._date = date", "def set_rdate(self, rdate):\n self.__rdate = rdate", "def date(self, date):\n \n self._date = date", "def _fix_date(self):\n self._response.date = datetime.utcnow()", "def set_exchange_rate_date(self, exchange_rate_date):\n self.set_value_into_input_field(self.exchange_rate_date_locator, exchange_rate_date)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper to log the failed SQS records metric
def _log_failed(cls, count): MetricLogger.log_metric(FUNCTION_NAME, MetricLogger.SQS_FAILED_RECORDS, count)
[ "def test_failed_deliveries_logging(self):\n sms = SMS.objects.create(to='+6280000000000', status=STATUS.queued,\n backend_alias='error')\n call_command('send_queued_sms', log_level=0)\n self.assertEqual(sms.logs.count(), 0)\n\n sms = SMS.objects.create(to='+6280000000000', status=STATUS.queued,\n backend_alias='error')\n call_command('send_queued_sms', log_level=1)\n self.assertEqual(sms.logs.count(), 1)\n\n sms = SMS.objects.create(to='+6280000000000', status=STATUS.queued,\n backend_alias='error')\n call_command('send_queued_sms', log_level=2)\n self.assertEqual(sms.logs.count(), 1)", "def append_record_failure():\n\t\tpass", "def test_firehose_record_delivery_failure(self, mock_logging):\n class MockFirehoseClient(object):\n @staticmethod\n def put_record_batch(**kwargs):\n return {\n 'FailedPutCount': len(kwargs.get('Records')),\n 'RequestResponses': [\n {\n 'RecordId': '12345',\n 'ErrorCode': '300',\n 'ErrorMessage': 'Bad message!!!'\n },\n ]\n }\n\n self.__sa_handler.firehose_client = MockFirehoseClient()\n\n test_event = convert_events_to_kinesis([\n # unit_test_simple_log\n {\n 'unit_key_01': 1,\n 'unit_key_02': 'test'\n },\n {\n 'unit_key_01': 2,\n 'unit_key_02': 'test'\n },\n # test_log_type_json_nested\n {\n 'date': 'January 01, 3005',\n 'unixtime': '32661446400',\n 'host': 'my-host.name.website.com',\n 'data': {\n 'super': 'secret'\n }\n }\n ])\n\n self.__sa_handler.run(test_event)\n assert_true(mock_logging.error.called)", "def test_failed_deliveries_logging(self):\n email = Email.objects.create(from_email='from@example.com',\n to=['to@example.com'], status=STATUS.queued,\n backend_alias='error')\n call_command('send_queued_mail', log_level=0)\n self.assertEqual(email.logs.count(), 0)\n\n email = Email.objects.create(from_email='from@example.com',\n to=['to@example.com'], status=STATUS.queued,\n backend_alias='error')\n call_command('send_queued_mail', log_level=1)\n self.assertEqual(email.logs.count(), 1)\n\n email = Email.objects.create(from_email='from@example.com',\n to=['to@example.com'], status=STATUS.queued,\n backend_alias='error')\n call_command('send_queued_mail', log_level=2)\n self.assertEqual(email.logs.count(), 1)", "def _process_failed_payloads(self, response):\n i = 0\n failed_records = []\n for r in response[\"Records\"]:\n logger.debug(\"Response: {}\".format(r))\n if \"ErrorCode\" in r:\n logger.warning(\"Payload failed to be sent to Kinesis. Message content: {}\".format(r))\n failed_records.append(i)\n i += 1\n successful_message_count = len(self.batch_in_progress) - len(failed_records)\n if successful_message_count:\n logger.info(\"Sent messages to kinesis {}\".format(successful_message_count))\n if failed_records:\n logger.debug(\n \"Failed Records: {}\".format(response[\"FailedRecordCount\"]))\n batch_of_problematic_records = [self.batch_in_progress[i] for i in failed_records]\n if len(failed_records) <= 2:\n for payload in batch_of_problematic_records:\n self._send_individual_payload(payload)\n else:\n self._batch_send_payloads(batch_of_problematic_records)\n self.batch_in_progress = None", "def identify_result_error(self, record):\n return [\"error\"]", "def test_unique_buckets_invalid_record(self, mock_logging):\n self.client.received_messages = [{'Body': '{\"missing-key\": 1}'}]\n unique_buckets = self.client.unique_buckets_from_messages()\n\n assert_false(unique_buckets)\n assert_true(mock_logging.error.called)", "def LogDbError():\n pass", "def record_error(self, msg):\n args = {\n 'userName': self.user,\n 'command': self.command + NTR('-failed'),\n 'repo': self.repo_name\n }\n p4gf_log.record_error(msg, args, self._environ)", "def test_unique_buckets_invalid_sqs(self, mock_logging):\n self.client.received_messages = ['wrong-format-test']\n unique_buckets = self.client.unique_buckets_from_messages()\n\n assert_false(unique_buckets)\n assert_true(mock_logging.error.called)", "def test_firehose_record_delivery_failed_put_count(self, mock_logging):\n self.__sa_handler.firehose_client = boto3.client(\n 'firehose', region_name='us-east-1')\n\n test_event = convert_events_to_kinesis([\n # unit_test_simple_log\n {\n 'unit_key_01': 1,\n 'unit_key_02': 'test'\n },\n {\n 'unit_key_01': 2,\n 'unit_key_02': 'test'\n },\n # test_log_type_json_nested\n {\n 'date': 'January 01, 3005',\n 'unixtime': '32661446400',\n 'host': 'my-host.name.website.com',\n 'data': {\n 'super': 'secret'\n }\n }\n ])\n\n delivery_stream_names = ['streamalert_data_test_log_type_json_nested',\n 'streamalert_data_unit_test_simple_log']\n\n # Setup mock delivery streams\n for delivery_stream in delivery_stream_names:\n self.__sa_handler.firehose_client.create_delivery_stream(\n DeliveryStreamName=delivery_stream,\n S3DestinationConfiguration={\n 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role',\n 'BucketARN': 'arn:aws:s3:::kinesis-test',\n 'Prefix': '{}/'.format(delivery_stream),\n 'BufferingHints': {\n 'SizeInMBs': 123,\n 'IntervalInSeconds': 124\n },\n 'CompressionFormat': 'Snappy',\n }\n )\n\n with patch.object(self.__sa_handler.firehose_client, 'put_record_batch') as firehose_mock:\n firehose_mock.side_effect = [\n {\n 'FailedPutCount': 3,\n 'RequestResponses': [\n {\n \"ErrorCode\": \"ServiceUnavailableException\",\n \"ErrorMessage\": \"Slow down.\"\n },\n {\n \"ErrorCode\": \"ServiceUnavailableException\",\n \"ErrorMessage\": \"Slow down.\"\n },\n {\n \"ErrorCode\": \"ServiceUnavailableException\",\n \"ErrorMessage\": \"Slow down.\"\n }\n ]\n },\n {\n 'FailedPutCount': 3,\n 'RequestResponses': [\n {\n \"ErrorCode\": \"ServiceUnavailableException\",\n \"ErrorMessage\": \"Slow down.\"\n },\n {\n \"ErrorCode\": \"ServiceUnavailableException\",\n \"ErrorMessage\": \"Slow down.\"\n },\n {\n \"ErrorCode\": \"ServiceUnavailableException\",\n \"ErrorMessage\": \"Slow down.\"\n }\n ]\n },\n {\n 'FailedPutCount': 0,\n 'RequestResponses': [\n {\n \"RecordId\": \"12345678910\",\n \"ErrorCode\": \"None\",\n \"ErrorMessage\": \"None\"\n },\n {\n \"RecordId\": \"12345678910\",\n \"ErrorCode\": \"None\",\n \"ErrorMessage\": \"None\"\n },\n {\n \"RecordId\": \"12345678910\",\n \"ErrorCode\": \"None\",\n \"ErrorMessage\": \"None\"\n }\n ]\n }]\n self.__sa_handler.run(test_event)\n\n firehose_mock.assert_called()\n assert_true(mock_logging.info.called)", "def logerr(msg, job):\n log(msg, job, err=True)", "def _log_failures(self, results):\n table_title = \"Failed Apex Tests\"\n table_data = self._get_table_data(results)\n table = CliTable(\n table_data,\n table_title,\n )\n table.echo()", "def test_firehose_record_delivery_client_error(self, mock_logging):\n self.__sa_handler.firehose_client = boto3.client(\n 'firehose', region_name='us-east-1')\n\n test_events = [\n # unit_test_simple_log\n {'unit_key_01': 2, 'unit_key_02': 'testtest'}\n for _\n in range(10)]\n\n self.__sa_handler._firehose_request_helper('invalid_stream',\n test_events)\n\n missing_stream_message = 'Client Error ... An error occurred ' \\\n '(ResourceNotFoundException) when calling the PutRecordBatch ' \\\n 'operation: Stream invalid_stream under account 123456789012 not found.'\n assert_true(mock_logging.error.called_with(missing_stream_message))", "def _process_cache_error(self, msg):\n # type: (str) -> None\n self._log_exception(msg)\n self._increment_metric(\"redis_error\")", "def __errback(self, exc, interval):\n self.log.warn(\"Unable to publish message: {}. Retry in {}s.\".format(exc, interval))", "def log_rate_limit_error(error_to_log):\n logging.warning('API Rate limit exceeded %s',\n error_to_log)", "def log_batch(self, run_id, metrics, params, tags):", "async def test_failed_samples(self):\n self.set_source_parameter(\"test_result\", [\"failed\"])\n response = await self.collect(get_request_json_return_value=self.JMETER_JSON)\n self.assert_measurement(response, value=\"6\", entities=[])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Segment the records into batches that conform to SQS restrictions This will log any single record that is too large to send, and skip it.
def _message_batches(cls, records): # Dump the records to a list of minimal json records_json = [ json.dumps(record, separators=(',', ':')) for record in records ] current_batch_size = 0 current_batch = [] for record in records_json: line_len = len(record) # Check if the max size of the batch has been reached or if the current # record will exceed the max batch size and start a new batch if ((len(current_batch) == cls.MAX_BATCH_COUNT) or (current_batch_size + line_len > cls.MAX_BATCH_SIZE)): yield current_batch[:] current_batch_size = 0 del current_batch[:] if line_len > cls.MAX_BATCH_SIZE: LOGGER.error('Record too large (%d) to send to SQS:\n%s', line_len, record) cls._log_failed(1) continue # Add the record to the batch current_batch_size += line_len current_batch.append(record) # yield the result of the last batch (no need to copy via slicing) if current_batch: yield current_batch
[ "def test_firehose_segment_records_by_size(self):\n\n record_batch = [\n # unit_test_simple_log\n {'unit_key_01': 2, 'unit_key_02': 'testtest' * 10000}\n for _\n in range(100)]\n\n sized_batches = []\n\n for sized_batch in self.__sa_handler._segment_records_by_size(record_batch):\n sized_batches.append(sized_batch)\n\n assert_true(len(str(sized_batches[0])) < 4000000)\n assert_equal(len(sized_batches), 4)\n assert_true(isinstance(sized_batches[3][0], dict))", "def test_record_batches_max_batch_count(self):\n records = self._sample_raw_records(count=501)\n\n result = list(FirehoseClient._record_batches(records, 'test_function_name'))\n assert_equal(len(result), 2)\n assert_equal(len(result[0]), 500)\n assert_equal(len(result[1]), 1)", "def _send_batch(self):\n LOGGER.info('Sending SQS batch of %d keys: %s ... %s',\n sum(msg.num_keys for msg in self._messages), self._first_key, self._last_key)\n response = SQS_CLIENT.send_message_batch(\n QueueUrl=self._queue_url,\n Entries=[msg.sqs_entry() for msg in self._messages if msg.num_keys > 0]\n )\n\n failures = response.get('Failed', [])\n if failures:\n for failure in failures:\n LOGGER.error('Unable to enqueue S3 key %s: %s',\n self._messages[int(failure['Id'])], failure['Message'])\n boto3.client('cloudwatch').put_metric_data(Namespace='BinaryAlert', MetricData=[{\n 'MetricName': 'BatchEnqueueFailures',\n 'Value': len(failures),\n 'Unit': 'Count'\n }])\n\n for msg in self._messages:\n msg.reset()\n self._first_key = None", "def _log_process(self, log_req):\n rq_size = log_req.multipart_size\n with self._lock:\n if self._payload_size + rq_size >= self.max_payload_size:\n if len(self._batch) > 0:\n self._send_batch()\n self._batch.append(log_req)\n self._payload_size += rq_size\n if len(self._batch) >= self.max_entry_number:\n self._send_batch()", "def _send_batch(self):\n batch = RPLogBatch(self._batch)\n http_request = HttpRequest(\n self.session.post, self._log_endpoint, files=batch.payload,\n verify_ssl=self.verify_ssl)\n batch.http_request = http_request\n self._worker.send(batch)\n self._batch = []\n self._payload_size = helpers.TYPICAL_MULTIPART_FOOTER_LENGTH", "def test_record_batches_rec_too_large(self, failure_mock):\n records = [\n {'key': 'test' * 1000 * 1000}\n ]\n\n result = list(FirehoseClient._record_batches(records, 'test_function_name'))\n assert_equal(result, [])\n failure_mock.assert_called_with(1, 'test_function_name')", "def test_record_batches_max_batch_size(self):\n records = [\n {'key_{}'.format(i): 'test' * 100000}\n for i in range(10)\n ]\n result = list(FirehoseClient._record_batches(records, 'test_function_name'))\n assert_equal(len(result), 2)\n assert_equal(len(result[0]), 9)\n assert_equal(len(result[1]), 1)\n batch_size_01 = sum(len(rec) for rec in result[0])\n batch_size_02 = sum(len(rec) for rec in result[1])\n assert_equal(batch_size_01 < FirehoseClient.MAX_BATCH_SIZE, True)\n assert_equal(batch_size_02 < FirehoseClient.MAX_BATCH_SIZE, True)\n assert_equal(batch_size_01 + batch_size_02 > FirehoseClient.MAX_BATCH_SIZE, True)", "def test_firehose_limit_record_size(self, mock_logging):\n test_events = [\n # unit_test_simple_log\n {\n 'unit_key_01': 1,\n 'unit_key_02': 'test' * 250001 # is 4 bytes higher than max\n },\n {\n 'unit_key_01': 2,\n 'unit_key_02': 'test'\n },\n # test_log_type_json_nested\n {\n 'date': 'January 01, 3005',\n 'unixtime': '32661446400',\n 'host': 'my-host.name.website.com',\n 'data': {\n 'super': 'secret'\n }\n }\n ]\n\n self.__sa_handler._limit_record_size(test_events)\n\n assert_true(len(test_events), 2)\n assert_true(mock_logging.error.called)", "def _process_failed_payloads(self, response):\n i = 0\n failed_records = []\n for r in response[\"Records\"]:\n logger.debug(\"Response: {}\".format(r))\n if \"ErrorCode\" in r:\n logger.warning(\"Payload failed to be sent to Kinesis. Message content: {}\".format(r))\n failed_records.append(i)\n i += 1\n successful_message_count = len(self.batch_in_progress) - len(failed_records)\n if successful_message_count:\n logger.info(\"Sent messages to kinesis {}\".format(successful_message_count))\n if failed_records:\n logger.debug(\n \"Failed Records: {}\".format(response[\"FailedRecordCount\"]))\n batch_of_problematic_records = [self.batch_in_progress[i] for i in failed_records]\n if len(failed_records) <= 2:\n for payload in batch_of_problematic_records:\n self._send_individual_payload(payload)\n else:\n self._batch_send_payloads(batch_of_problematic_records)\n self.batch_in_progress = None", "def _CutBatch(self):\r\n if self._buffer is not None:\r\n batch = None\r\n try:\r\n log_buf = self._buffer.getvalue()\r\n if type(log_buf) is unicode:\r\n import tornado.escape\r\n log_buf = tornado.escape.utf8(log_buf)\r\n batch = self.MakeBatch(log_buf)\r\n except:\r\n logging.exception('Failure to generate log batch!')\r\n pass\r\n\r\n if batch:\r\n try:\r\n self._persistor.PersistLogBatch(batch)\r\n except:\r\n logging.exception('Failure to persist log batch!')\r\n pass\r\n\r\n self._buffer.close()\r\n self._buffer = None\r\n self._inner_handler = None", "def _maybe_slice_large_record_batch(\n record_batch: pa.RecordBatch,\n) -> Iterable[pa.RecordBatch]:\n if record_batch.nbytes > _MAX_TRANSFORMED_BATCH_BYTES_SIZE:\n if record_batch.num_rows < 2:\n logging.warning(\n 'Transformed data row may be too large: %d bytes. '\n 'Consider reshaping outputs to distribute elements over a larger '\n 'number of rows to allow automatic slicing.',\n record_batch.nbytes,\n )\n yield record_batch\n return\n # Note that slicing is a zero-copy operation, so the produced batches will\n # still share memory with the original one up to the materialization\n # boundary.\n mid_point = record_batch.num_rows // 2\n yield from _maybe_slice_large_record_batch(\n record_batch.slice(offset=0, length=mid_point)\n )\n yield from _maybe_slice_large_record_batch(\n record_batch.slice(offset=mid_point)\n )\n else:\n yield record_batch", "def create_messages_from_df(df):\n max_size = 10\n messages = []\n batch_messages = []\n for index, series in df.iterrows():\n mod_index = index%max_size #Max of 10 messages can be sent in a single batch process.\n message = {\n 'Id': str(mod_index),\n 'MessageBody': series['path'],\n 'MessageAttributes': {\n 'Classification': {\n 'StringValue': str(series['classification']),\n 'DataType': 'Number'\n },\n 'SQSQueue': {\n 'StringValue': str(series['sqs_queue']),\n 'DataType': 'String'\n },\n 'Algorithm': {\n 'StringValue': str(series['of_algorithm']),\n 'DataType': 'String'\n },\n }\n }\n messages.append(message)\n if len(messages) == max_size :\n print(\"len messages = max size\")\n batch_messages.append(messages)\n messages=[]\n\n if len(messages) != max_size and len(messages) > 0:\n batch_messages.append(messages)\n\n\n return batch_messages", "def _post_large_bulk_insert(self, offset):\n\n message1 = {\"body\": '', \"ttl\": 300}\n message2 = {\"body\": '', \"ttl\": 120}\n\n doc = {'messages': [message1, message2]}\n overhead = len(jsonutils.dumps(doc))\n\n half_size = (self.limits.max_messages_post_size - overhead) // 2\n message1['body'] = helpers.generate_random_string(half_size)\n message2['body'] = helpers.generate_random_string(half_size + offset)\n\n return self.client.post(data=doc)", "def check_batch_limit(psid_list):\n psid_count = len(psid_list.split(', '))\n psid_limit = settings.common.FB_BATCH_REQUEST['limit']\n if psid_count > int(psid_limit):\n raise Exception(\n \"Facebook batch request limit exceeded. Allowed {0}, received {1}\".format(psid_limit, psid_count)\n )", "def test_message_bulk_insert_oversized(self):\n\n doc = '[{{\"body\": \"{0}\", \"ttl\": 300}}, {{\"body\": \"{1}\", \"ttl\": 120}}]'\n overhead = len(doc.format('', ''))\n\n half_size = (self.limits.max_messages_post_size - overhead) // 2\n doc = doc.format(helpers.generate_random_string(half_size),\n helpers.generate_random_string(half_size + 1))\n\n result = self.client.post(data=doc)\n self.assertEqual(400, result.status_code)", "def old_ensure_batch_is_sufficiently_small(self, batch_instances: Iterable[Instance]) -> List[List[Instance]]:\n if self._maximum_samples_per_batch is None:\n return [list(batch_instances)]\n\n # check if we need to break into smaller chunks\n key, limit = self._maximum_samples_per_batch\n padding_length = -1\n list_batch_instances = list(batch_instances)\n for instance in list_batch_instances:\n if self.vocab is not None:\n # we index here to ensure that shape information is available,\n # as in some cases (with self._maximum_samples_per_batch)\n # we need access to shaping information before batches are constructed)\n instance.index_fields(self.vocab)\n field_lengths = instance.get_padding_lengths()\n for _, lengths in field_lengths.items():\n try:\n padding_length = max(padding_length,\n lengths[key])\n except KeyError:\n pass\n\n if padding_length * len(list_batch_instances) > limit:\n # need to shrink\n num_samples = padding_length * len(list_batch_instances)\n num_shrunk_batches = math.ceil(num_samples / float(limit))\n shrunk_batch_size = math.ceil(len(list_batch_instances) / num_shrunk_batches)\n shrunk_batches = []\n start = 0\n while start < len(list_batch_instances):\n end = start + shrunk_batch_size\n shrunk_batches.append(list_batch_instances[start:end])\n start = end\n return shrunk_batches\n else:\n return [list_batch_instances]", "def _flush_batch(self) -> None:\n batch_len = len(self._current_batch)\n if batch_len == 0:\n self.logger.debug('Nothing to flush.')\n return\n\n self.logger.debug(f'Flushing batch size {batch_len}')\n\n with self.LOCK:\n to_process_batch = list(self._current_batch)\n self._current_batch = list()\n\n log_event = EventFactory.create_log_event(to_process_batch, self.logger)\n\n self.notification_center.send_notifications(enums.NotificationTypes.LOG_EVENT, log_event)\n\n if log_event is None:\n self.logger.exception('Error dispatching event: Cannot dispatch None event.')\n return\n\n try:\n self.event_dispatcher.dispatch_event(log_event)\n except Exception as e:\n self.logger.error(f'Error dispatching event: {log_event} {e}')", "def _bulk_insert(self):\n self.stdout.write(\"Inserting\")\n\n batch_size = 10000\n objs = tuple(self.bulks)\n total = len(objs)\n pbar = tqdm(total=total)\n\n while True:\n batch = list(islice(objs, batch_size))\n if not batch:\n break\n Logs.objects.bulk_create(batch, batch_size, ignore_conflicts=True)\n pbar.update(len(batch))", "def chunks(self):\n lines = self.lines()\n while True:\n msg = \"\"\n if not lines:\n break\n while lines and len(msg + lines[0]) < self.limit:\n msg += lines.pop(0) + \"\\n\"\n yield msg" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inspect the response and remove any records records that have successfully to sent For each record, the index of the response element is the same as the index used in the request array.
def _strip_successful_records(cls, messages, response): success_ids = { item['Id'] for item in response['Successful'] } LOGGER.info('Removing sucessful message indices from batch: %s', success_ids) for success_id in success_ids: # Get the successful message by ID and remove it message = cls._extract_message_by_id(messages, success_id) if not message: continue messages.remove(message)
[ "def test_strip_successful_records(self):\n batch = [{'test': 'success'}, {'other': 'failure'}, {'other': 'info'}]\n response = {\n 'FailedPutCount': 1,\n 'RequestResponses': [\n {'RecordId': 'rec_id_01'},\n {'ErrorCode': 10, 'ErrorMessage': 'foo'},\n {'RecordId': 'rec_id_03'}\n ]\n }\n\n expected_batch = [{'other': 'failure'}]\n FirehoseClient._strip_successful_records(batch, response)\n\n assert_equal(batch, expected_batch)", "def clean_none_response(self):\n\n print(\"# Rows before non response are removed: {} \".format(len(self.data)))\n self.data = self.data[self.data['names'].map(lambda d: len(d) > 0)]\n print(\"# Rows after non response are removed: {} \".format(len(self.data)))", "def _process_failed_payloads(self, response):\n i = 0\n failed_records = []\n for r in response[\"Records\"]:\n logger.debug(\"Response: {}\".format(r))\n if \"ErrorCode\" in r:\n logger.warning(\"Payload failed to be sent to Kinesis. Message content: {}\".format(r))\n failed_records.append(i)\n i += 1\n successful_message_count = len(self.batch_in_progress) - len(failed_records)\n if successful_message_count:\n logger.info(\"Sent messages to kinesis {}\".format(successful_message_count))\n if failed_records:\n logger.debug(\n \"Failed Records: {}\".format(response[\"FailedRecordCount\"]))\n batch_of_problematic_records = [self.batch_in_progress[i] for i in failed_records]\n if len(failed_records) <= 2:\n for payload in batch_of_problematic_records:\n self._send_individual_payload(payload)\n else:\n self._batch_send_payloads(batch_of_problematic_records)\n self.batch_in_progress = None", "def cleanupRequests(n=10):\n\n # formula for filtering data from airtable\n formula = 'AND(DATETIME_DIFF(NOW(), {Last Modified}, \"days\") > 30, Status = \"Request Complete\")'\n\n # airtable query\n headers = {\"Authorization\": \"Bearer {}\".format(os.environ['AIRTABLE_AUTH_TOKEN'])}\n params = params = {\n 'maxRecords': 10,\n 'view': 'All Requests + Data',\n 'sortField':'Last Modified',\n 'sortDirection': 'asc',\n 'filterByFormula': formula\n\n }\n\n\n r = requests.get(os.environ['PROD_URL'], headers=headers, params=params)\n\n # if status code is good ...\n if r.status_code == 200:\n\n # instantiate twilio client\n client = Client(os.environ['ACCOUNT_SID'], os.environ['TWILIO_AUTH_TOKEN'])\n\n # iterate through records\n for record in r.json()['records']:\n\n data = {\n 'fields':\n {'Message': \"\",\n 'First Name': \"\"\n }\n }\n\n # patch the requisite fields\n r = requests.patch(\n os.environ['PROD_URL'] + record['id'] , headers=headers, json=data\n )\n\n # erase the recordings associated with the call SID\n call_sid = record['fields']['Twilio Call Sid']\n call = client.calls(call_sid).fetch()\n\n for recording_sid in call.recordings.list():\n client.recordings(recording_sid).delete()\n\n # confirm deletion\n successfully_deleted = 0\n r = requests.get(os.environ['PROD_URL'] + record['id'], headers=headers)\n call = client.calls(call_sid).fetch()\n\n if all([r.status_code == 200, \n 'Message' not in r.json().keys(), \n 'First Name' not in r.json().keys(),\n len(call.recordings.list()) == 0]):\n print('succesfully deleted')\n successfully_deleted += 1\n \n else:\n print('error')\n\n return str(successfully_deleted)", "def remove_record_failure():\n\t\tpass", "def sanitize_reply_buffer(self): \n for i in self.async_reply_buffer:\n\n if not i.endswith('\\n'):\n \n i = self.async_reply_buffer.index(i)\n temp = self.async_reply_buffer\n #with suppress(IndexError):\n if i+1 == len(temp):\n return 'SANFAIL'\n if i < len(temp):\n #print(i)\n #print(len(temp))\n #print(temp)\n #print(temp[i])\n #print(temp[i+1])\n temp[i] = temp[i] + temp[i+1]\n temp.pop(i+1)\n self.async_reply_buffer = temp\n\n\n #print(self.async_reply_buffer)", "def test_handle_response_remove_request_from_pending(self):\n lookup = Lookup(FindNode, self.target, self.node, self.event_loop)\n uuid = [uuid for uuid in lookup.pending_requests.keys()][0]\n contact = lookup.shortlist[0]\n msg = Value(uuid, self.node.network_id, self.node.network_id,\n self.reply_port, self.version, self.seal, self.target,\n 'value', time.time(), time.time() + 99999, self.version,\n PUBLIC_KEY, 'name', 'signature')\n response = asyncio.Future()\n response.set_result(msg)\n lookup._handle_response(uuid, contact, response)\n self.assertNotIn(uuid, lookup.pending_requests.keys())", "def removeResponseFromList(self, response: IlluminationResponse) -> None:\n matchingRows = self.listResponses.findItems(str(response), Qt.MatchExactly)\n\n for matchingRow in matchingRows:\n self.listResponses.takeItem(self.listResponses.row(matchingRow))\n\n self.deselectSelectedRow()", "def __send_responses(self):\n # create a copy of the responses\n responses = self.__responses\n # for every response\n for response in responses:\n # send the response\n self.__send(response)\n # remove the response from the responses' list\n if response in self.__responses:\n self.__responses.remove(response)", "def segmentResponse(self):\n self.segResponses = []\n for response in self.responses:\n keywordResponse = [keyword for keyword in self.wordSegmentation(response)\n if keyword not in self.stopwords\n and keyword != ' ']\n self.totalWords += len(keywordResponse)\n self.segResponses.append(keywordResponse)\n #logging.info(\"已完成回應斷詞\")", "def handle_data_response(response):\n records_data = []\n for message in response:\n data = message.data\n if not empty_data_chunk(data):\n records_data.append(from_record_data(data))\n\n return pd.concat(records_data)", "def clearResponseList(self) -> None:\n self.listResponses.clear()", "def _recordsToResponse(self, records):\n fieldsList = []\n count = 0\n if records:\n size = 0\n while size < self._maxSize:\n try:\n record = records.pop()\n except (KeyError, IndexError):\n # We're done.\n # Note: because records is an iterable (list or set)\n # we're catching both KeyError and IndexError.\n break\n pickled = pickle.dumps(self.recordToDict(record))\n size = size + len(pickled)\n fieldsList.append(pickled)\n count += 1\n\n response = {\"items\": fieldsList}\n\n if records:\n response[\"continuation\"] = self._storeContinuation(records, \"records\")\n\n return response", "def clear_ret_messages(self):\r\n for edge_id in self.messages:\r\n if \"ret_test\" in self.messages[edge_id]:\r\n self.messages[edge_id].pop(\"ret_test\")", "def remove_response(self, uuid):\n if uuid in self._response:\n del self._response[uuid]", "def removed_requests(self):\n current_ids = {request.id for request in self.all_requests}\n unknown_ids = self.state.known_requests.keys() - current_ids\n schema = self._schema()\n removed_requests = []\n for req_id in sorted(unknown_ids):\n request = schema.Request()\n request.id = req_id\n removed_requests.append(request)\n return removed_requests", "def remove_response(self, uuid):\n if not uuid:\n return\n\n if uuid in self.response:\n del self.response[uuid]", "def _on_tracking_failure(self, response, data):\n try:\n response = json.loads(response)\n except:\n # the response should be in JSON, but in case it can't be parsed just try another attempt\n logging.debug(\"cannot parse tracker response, should be valid JSON\")\n return response\n\n # remove the successfully tracked hits from payload\n tracked = response['tracked']\n data['requests'] = data['requests'][tracked:]\n\n return response['message']", "def response( self, request, error_code, data ):\n array = []\n if request == b'CAUTH' and data != self.__null_byte:\n # process differently\n data_array = self.ds_document.break_data(data)\n # print('after data is broken: {}'.format(data_array))\n for item in data_array: # for all the items we have to generate a different timestamp and checkum\n timestamp = self.get_time()\n checksum = self.get_checksum(timestamp, item)\n array.append([request, checksum, timestamp, error_code, item])\n # print(array)\n # print(array)\n return array\n\n else: # if we are sending a generic response, then\n timestamp = self.get_time()\n checksum = self.get_checksum(timestamp, data)\n\n array = [request, checksum, timestamp, error_code, data]\n return array" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send a list of records to SQS, batching as necessary
def send(self, payloads): records = self._payload_messages(payloads) # SQS only supports up to 10 messages so do the send in batches for message_batch in self._message_batches(records): response = self._send_messages(message_batch) self._finalize(response, message_batch)
[ "def _send_batch(self):\n LOGGER.info('Sending SQS batch of %d keys: %s ... %s',\n sum(msg.num_keys for msg in self._messages), self._first_key, self._last_key)\n response = SQS_CLIENT.send_message_batch(\n QueueUrl=self._queue_url,\n Entries=[msg.sqs_entry() for msg in self._messages if msg.num_keys > 0]\n )\n\n failures = response.get('Failed', [])\n if failures:\n for failure in failures:\n LOGGER.error('Unable to enqueue S3 key %s: %s',\n self._messages[int(failure['Id'])], failure['Message'])\n boto3.client('cloudwatch').put_metric_data(Namespace='BinaryAlert', MetricData=[{\n 'MetricName': 'BatchEnqueueFailures',\n 'Value': len(failures),\n 'Unit': 'Count'\n }])\n\n for msg in self._messages:\n msg.reset()\n self._first_key = None", "def send_record_batch(kinesis_client, stream_name, raw_records):\n\n # Translate input records into the format needed by the boto3 SDK\n formatted_records = []\n for rec in raw_records:\n formatted_records.append({'PartitionKey': rec['event']['event_id'], 'Data': json.dumps(rec)})\n kinesis_client.put_records(StreamName=stream_name, Records=formatted_records)\n print('Sent %d records to stream %s.' % (len(formatted_records), stream_name))", "def submit_batch(self, batch: list):\n try:\n logging.info(f\"Pushing tweet data of size {len(batch)} to Kinesis\")\n response = self.firehose_client.put_record_batch(DeliveryStreamName=self.delivery_stream,\n Records=[{\n \"Data\": json.dumps(batch)\n }])\n logging.debug(response)\n\n except ClientError as ex:\n logging.exception(f\"Could not push tweet batch to Kinesis: {ex}\")\n\n finally:\n self.counter += len(batch)\n self._batch = []", "def _message_batches(cls, records):\n # Dump the records to a list of minimal json\n records_json = [\n json.dumps(record, separators=(',', ':')) for record in records\n ]\n\n current_batch_size = 0\n current_batch = []\n for record in records_json:\n line_len = len(record)\n # Check if the max size of the batch has been reached or if the current\n # record will exceed the max batch size and start a new batch\n if ((len(current_batch) == cls.MAX_BATCH_COUNT) or\n (current_batch_size + line_len > cls.MAX_BATCH_SIZE)):\n yield current_batch[:]\n current_batch_size = 0\n del current_batch[:]\n\n if line_len > cls.MAX_BATCH_SIZE:\n LOGGER.error('Record too large (%d) to send to SQS:\\n%s', line_len, record)\n cls._log_failed(1)\n continue\n\n # Add the record to the batch\n current_batch_size += line_len\n current_batch.append(record)\n\n # yield the result of the last batch (no need to copy via slicing)\n if current_batch:\n yield current_batch", "def put_record_batch(DeliveryStreamName=None, Records=None):\n pass", "def send_messages_to_ks(records: List[str], stream_name: str):\n log.info('Sending message to Kinesis Stream')\n client = boto3.client('kinesis')\n return client.put_records(\n Records=[\n {\n 'Data': record + '\\n',\n 'PartitionKey': '1'\n } for record in records],\n StreamName=stream_name\n )", "def test_send_batch(self):\n pass", "def batch_push(self, payloads):\n body = json.dumps(payloads)\n\n status, response = self._request('POST', body, BATCH_PUSH_URL,\n 'application/json')\n if not status == 200:\n raise AirshipFailure(status, response)", "def _send_batch(self, service_checks: list):\n for service_check in service_checks:\n self._send(service_check)", "def _send_batch(self):\n batch = RPLogBatch(self._batch)\n http_request = HttpRequest(\n self.session.post, self._log_endpoint, files=batch.payload,\n verify_ssl=self.verify_ssl)\n batch.http_request = http_request\n self._worker.send(batch)\n self._batch = []\n self._payload_size = helpers.TYPICAL_MULTIPART_FOOTER_LENGTH", "def _dispatch_batches(self, base_url, endpoint, item_list, prep_args, dataset_id=None, dataset_version=None):\n pool = ThreadPool(processes=self.pool_size)\n batch = []\n\n # Decide which _prep function to use based on the endpoint\n if endpoint == 'import' or endpoint == 'import-events':\n prep_function = Mixpanel._prep_event_for_import\n elif endpoint == 'engage' or endpoint == 'import-people':\n prep_function = Mixpanel._prep_params_for_profile\n else:\n Mixpanel.LOGGER.warning(\n 'endpoint must be \"import\", \"engage\", \"import-events\" or \"import-people\", found: ' + str(endpoint))\n return\n\n if base_url == self.BETA_IMPORT_API:\n batch_size = 1000\n else:\n batch_size = 50\n\n for item in item_list:\n if prep_args is not None:\n # Insert the given item as the first argument to be passed to the _prep function determined above\n prep_args[0] = item\n params = prep_function(*prep_args)\n if params:\n batch.append(params)\n else:\n batch.append(item)\n\n if len(batch) == batch_size:\n # Add an asynchronous call to _send_batch to the thread pool\n pool.apply_async(self._send_batch, args=(base_url, endpoint, batch, dataset_id, dataset_version),\n callback=Mixpanel._response_handler_callback)\n batch = []\n\n # If there are fewer than batch_size updates left ensure one last call is made\n if len(batch):\n # Add an asynchronous call to _send_batch to the thread pool\n pool.apply_async(self._send_batch, args=(base_url, endpoint, batch, dataset_id, dataset_version),\n callback=Mixpanel._response_handler_callback)\n pool.close()\n pool.join()", "def batch_process(self, message_list, action, userId='me'):\n\n list_of_ids = []\n\n for key, value in message_list.items():\n list_of_ids.append(value)\n\n chunks = [list_of_ids[x:x+1000] for x in range(0, len(list_of_ids), 1000)]\n\n for page in range(0, len(chunks)):\n if action.lower() == 'archive':\n resource = getattr(self.connection.users().messages(), 'batchModify')\n body = { \n \"ids\": chunks[page],\n \"removeLabelIds\": [\"INBOX\"],\n }\n else:\n resource = getattr(self.connection.users().messages(), 'batchDelete')\n body = { \n \"ids\": chunks[page],\n }\n\n dynamic_request = resource(userId=userId, body=body)\n response = dynamic_request.execute()\n print(f'[√] Bulk Action: SUCCESS {len(chunks[page])} Messages have been {action}d! - {page}')\n print(f'[√] Bulk Action: SUCCESS Total Number of Processed Messages: {len(list_of_ids)}')\n return True", "def beat_inbox_sms_bulk():\n receipt_id_sms, list_of_sms_notifications = sms_bulk.poll()\n\n while list_of_sms_notifications:\n save_smss.apply_async((None, list_of_sms_notifications, receipt_id_sms), queue=QueueNames.BULK_DATABASE)\n current_app.logger.info(f\"Batch saving with Bulk Priority: SMS receipt {receipt_id_sms} sent to in-flight.\")\n receipt_id_sms, list_of_sms_notifications = sms_bulk.poll()", "def test_send_bulk(self):\n email = Email.objects.create(\n to=['to@example.com'], from_email='bob@example.com',\n subject='send bulk', message='Message', status=STATUS.queued,\n backend_alias='locmem')\n _send_bulk([email], uses_multiprocessing=False)\n self.assertEqual(Email.objects.get(id=email.id).status, STATUS.sent)\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].subject, 'send bulk')", "def process_batch(self, db, batch_id, event_list):\r\n for ev in event_list:\r\n self.process_event(db, ev)", "def apns_send_bulk_message(registration_ids, data, **kwargs):\n\tsocket = _apns_create_socket(APNS_SOCKET)\n\tfor registration_id in registration_ids:\n\t\t_apns_send(registration_id, data, socket=socket, **kwargs)\n\n\tsocket.close()", "def batch_write(self, table_name, items):\n dynamodb = self.conn\n table = dynamodb.Table(table_name)\n with table.batch_writer() as batch:\n for item in items:\n batch.put_item(Item=item)\n return True", "async def mass_send(self, messages: List[Sms]) -> List[int]:\n raise NotImplementedError", "def bulk_process(self):\n\n def actions():\n try:\n task = self.queue.get(block=False, timeout=None)\n\n if task['action'] == 'index':\n yield {\n '_op_type': 'index',\n '_index': self.ensure_index(task),\n '_id': task['id'],\n 'doc': task['properties']\n }\n elif task['action'] == 'delete':\n yield {\n '_op_type': 'delete',\n '_index': self.ensure_index(task),\n '_id': task['id'],\n 'doc': task['properties']\n }\n else:\n raise NotImplementedError\n\n except Empty:\n pass\n\n for success, info in streaming_bulk(self.es_client, actions()):\n if success:\n self.queue.task_done()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to add a user as friends that is, to create a bidirectional link that connects the two users.
def add_friends(self, user1_index, user2_index): if user1_index >= self.num_users or user2_index >= self.num_users: raise ValueError( f"Number of users is {self.num_users}, but indices " f"{user1_index} and {user2_index} were requested." ) if self.users_hat[user1_index, user2_index] == 0: self.users_hat[user1_index, user2_index] = 1 elif self.is_verbose(): self.log(f"User {user2_index} was already following user {user1_index}") if self.users_hat[user2_index, user1_index] == 0: self.users_hat[user2_index, user1_index] = 1 elif self.is_verbose(): self.log(f"User {user1_index} was already following user {user2_index}")
[ "def add_to_friends(self):\n self._iface.activate_overlay('friendadd', self.user_id)", "def friending(user, friend):\n user.update(add_to_set__friends=friend)\n friend.update(add_to_set__friends=user)", "def add_friend(self, friend_id):\n if Relationship.objects.filter(from_user_id=self.pk, to_user_id=friend_id).exists():\n return False\n else:\n friend = RegisteredUser.objects.filter(id=friend_id)\n if friend.count() == 1:\n friend = friend[0]\n Relationship.objects.create(from_user=self, to_user=friend, balance=0.0)\n return True\n else:\n return False", "def add_friend(user_id, friend_id):\n\n query = (db.friends.user_id == user_id)\n user_friends = db(query).select(db.friends.friends_list).first()\n\n user_friends = eval(user_friends[\"friends_list\"])\n user_friends.add(friend_id)\n\n db(query).update(friends_list=str(user_friends))", "def create_friend(user_id, friend_user_id):\n\n friend = User_Friend(user_id=user_id, friend_user_id=friend_user_id)\n\n db.session.add(friend)\n db.session.commit()\n\n return friend", "def add_relation(request, id):\n user = request.user\n friend = get_object_or_404(User, id=id)\n user.profile.relations.add(friend)\n user.profile.friends.remove(friend)\n messages.success(\n request,\n 'Friend added to your family list'\n )\n return redirect('profiles:my_family')", "def add_friend(self,user):\n api.add_privatedata(self,cherrypy.session['username'],cherrypy.session['api_key'],cherrypy.session['U_pass'],cherrypy.session['login_record'],cherrypy.session['signing_key'],cherrypy.session['priv_data'],cherrypy.session['privateKey'].decode(\"utf-8\"),[],[],[],[],[],user)\n raise cherrypy.HTTPRedirect('/')", "def add_friend():\n if request.method == 'POST':\n username = get_username()\n user_id = get_id_from_username(username)\n friend_to_add = get_id_from_username(request.form['add_user'])\n if not friend_to_add or friend_to_add==user_id:\n return redirect(url_for('message.converse'))\n add_friend_db(user_id, friend_to_add)\n return redirect(url_for('message.converse'))", "def get_user_friends_graph(self, user: str, user_friends_getter) -> nx.Graph:\n graph = nx.Graph()\n user_friends_list = user_friends_getter.get_friends_by_name(user)\n local = [user] + user_friends_list\n\n # Nodes are friends of user\n for agent in local:\n graph.add_node(agent)\n\n # Edges between user1 and user2 indicate that both users follow each other\n li = list(graph.nodes)\n for i in range(len(li)):\n for j in range(i, len(li)):\n user1 = li[i]\n user2 = li[j]\n\n user1_friends_list = user_friends_getter.get_friends_by_name(user1)\n user2_friends_list = user_friends_getter.get_friends_by_name(user2)\n\n if user1 in user2_friends_list and user2 in user1_friends_list:\n graph.add_edge(li[j], li[i])\n\n return graph", "def create_friends(user, existing_friends):\n #ToDo Add error handling\n bulk_insert = []\n existing_friend_ids = []\n for friend in existing_friends:\n bulk_insert.append(pm.FriendData(uid=user, friend_id=friend.user))\n bulk_insert.append(pm.FriendData(uid=friend.user, friend_id=user))\n existing_friend_ids.append(friend.uid)\n\n pm.FriendData.objects.bulk_create(\n bulk_insert\n )\n\n #Create Graph Node object\n graph_db = neo4j.GraphDatabaseService()\n\n user_index = graph_db.get_or_create_index(neo4j.Node, \"User\")\n user_node = user_index.get_or_create(\"uid\", UserSocialAuth.objects.get(user=user).uid, {\"uid\":UserSocialAuth.objects.get(user=user).uid})\n user_node.add_labels(\"User\")\n\n #Create friend relationships to existing friends\n for friend_uid in existing_friend_ids:\n\n friend_created = user_index.create_if_none(\"uid\", friend_uid, {\"uid\":friend_uid})\n if friend_created:\n friend_created.add_labels(\"User\")\n else:\n friend_created = user_index.get(\"uid\", friend_uid)[0]\n\n rel = graph_db.create((user_node, \"FRIENDS\", friend_created))", "def add_friend(self, name):\n self.friends.append(name)", "def set_friends(self, person1, person2):\n\n person1.adjacent.add(person2)\n person2.adjacent.add(person1)", "def scrape_friends(self, user, userid):\n for friend in self._get_friends(user):\n if friend not in self.users:\n continue\n pair = tuple(sorted([userid, self.users[friend]]))\n if pair not in self.friends:\n LOGGER.debug(\"Connecting %s with %s.\", *pair)\n self.friends[pair] = Friends.create(a=pair[0], b=pair[1]).id", "def test_friends_symmetrical(self):\n u = AppUser(id = 1)\n u.django_user = User.objects.create(username='Testuser')\n u.save()\n f = AppUser(id = 2)\n f.django_user = User.objects.create(username='Testuser2')\n f.save()\n\n u.friends.add(f)\n self.assertIs(u in f.friends.all(), True)\n self.assertIs(f in u.friends.all(), True)", "def add_friend(self, from_user, to_user, message=None):\n if from_user == to_user:\n raise ValidationError(\"Users cannot be friends with themselves\")\n\n if self.are_friends(from_user, to_user):\n raise IntegrityError(\"Users are already friends\")\n\n if message is None:\n message = ''\n\n request, created = FriendshipRequest.objects.get_or_create(\n from_user=from_user,\n to_user=to_user,\n )\n\n if created is False:\n raise IntegrityError(\"Friendship already requested\")\n\n if message:\n request.message = message\n request.save()\n return request", "def add_friend(request, pk):\n new_friend = User.objects.get(pk=pk)\n Friend.make_friend(request.user, new_friend)\n return redirect('posts:posts-list')", "def addFriend(self, f):\n\t\tself.friends.append(f)", "def are_friends(self, user1, user2):\n try:\n Friend.objects.get(to_user=user1, from_user=user2)\n return True\n except Friend.DoesNotExist:\n return False", "def test_requested_friends_asymmetrical(self):\n u = AppUser(id = 1)\n u.django_user = User.objects.create(username='Testuser')\n u.save()\n f = AppUser(id = 2)\n f.django_user = User.objects.create(username='Testuser2')\n f.save()\n \n f.requested_friends.add(u)\n self.assertIs(u in f.requested_friends.all(), True)\n self.assertIs(f in u.requested_friends.all(), False)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Render the Lilypond music expression lily using lilypond.
def render_lily(self, lily): shasum = "%s.png" % sha(lily.encode('utf-8')).hexdigest() relfn = posixpath.join(self.builder.imgpath, 'lily', shasum) outfn = path.join(self.builder.outdir, '_images', 'lily', shasum) if path.isfile(outfn): return relfn if hasattr(self.builder, '_lilypng_warned'): return None, None music = DOC_HEAD + self.builder.config.pnglily_preamble + lily if isinstance(music, unicode): music = music.encode('utf-8') # use only one tempdir per build -- the use of a directory is cleaner # than using temporary files, since we can clean up everything at once # just removing the whole directory (see cleanup_tempdir_lily) if not hasattr(self.builder, '_lilypng_tempdir'): tempdir = self.builder._lilypng_tempdir = tempfile.mkdtemp() else: tempdir = self.builder._lilypng_tempdir tf = open(path.join(tempdir, 'music.ly'), 'w') tf.write(music) tf.close() ensuredir(path.dirname(outfn)) # use some standard lilypond arguments lilypond_args = [self.builder.config.pnglily_lilypond] #lilypond_args += ['-o', tempdir, '--png'] lilypond_args += ['-dbackend=eps', '-dno-gs-load-fonts', '-dinclude-eps-fonts', '-o', tempdir, '--png'] # add custom ones from config value lilypond_args.extend(self.builder.config.pnglily_lilypond_args) # last, the input file name lilypond_args.append(path.join(tempdir, 'music.ly')) try: p = Popen(lilypond_args, stdout=PIPE, stderr=PIPE) except OSError, err: if err.errno != 2: # No such file or directory raise self.builder.warn('lilypond command %r cannot be run (needed for music ' 'display), check the pnglily_lilypond setting' % self.builder.config.pnglily_lilypond) self.builder._lilypng_warned = True return None, None stdout, stderr = p.communicate() if p.returncode != 0: raise LilyExtError(u'lilypond exited with error:\n[stderr]\n%s\n' '[stdout]\n%s' % (stderr.decode('utf-8'), stdout.decode('utf-8'))) shutil.copyfile(path.join(tempdir, 'music.png'), outfn) #Popen(['mogrify', '-trim', outfn], stdout=PIPE, stderr=PIPE) return relfn
[ "def render_voice(self, instrument):\n voice_render = f'<div class=\"lyrics\">{instrument.get_lyric()}</div>'\n return voice_render", "def render(self, ontol, **args):\n pass", "def hxlexpand():\n run_script(hxlexpand_main)", "def playOutput():\n global coordinates, lastPlayedCoordinates\n\n tempDir = \".bt_temp\"\n tempSongPath = tempDir + \"/lastPlayedSong.wav\"\n\n if (coordinates == []):\n return\n\n # If there have been no changes to the canvas, don't recreate the .wav files\n if (coordinates == lastPlayedCoordinates):\n if os.path.isfile(tempSongPath):\n call(['python','PlayMelody.py',tempSongPath])\n return\n\n lex = Lexer(coordinates)\n song = lex.compose_song()\n \n # Don't create a sub directory and just make them hidden files, this way no permission error\n\n # Delete the old one if it exists\n if os.path.exists(tempDir):\n shutil.rmtree(tempDir)\n # Create temporary directory to store intermediate files\n os.makedirs(tempDir)\n \n \n tempSongPath = tempDir + \"/lastPlayedSong.wav\"\n if os.path.exists(tempSongPath):\n shutil.rmtree(tempSongPath)\n\n createMelody(song, tempSongPath)\n\n call(['python','PlayMelody.py',tempSongPath])\n\n lastPlayedCoordinates = coordinates", "def render_ldl(variables, output):\n\n f = open(output, 'w')\n\n # Include header\n f.write(\"#include \\\"ldl.h\\\"\\n\\n\")\n\n # Write ldl_lsolve\n write_ldl_lsolve(f, variables)\n\n # Write ldl_ltsolve\n write_ldl_ltsolve(f, variables)\n\n # Write ldl_dinvsolve\n write_ldl_dinvsolve(f, variables)\n\n # Write ldl_perm\n write_ldl_perm(f, variables)\n\n # Write ldl_permt\n write_ldl_permt(f, variables)\n\n f.close()", "def render_harp(self, instrument):\n instr_silent = instrument.get_is_silent()\n instr_broken = instrument.get_is_broken()\n instr_type = instrument.get_type()\n\n note_renderer = HtmlNoteRenderer()\n\n if instr_broken:\n instr_state = \"broken\"\n elif instr_silent:\n instr_state = \"silent\"\n else:\n instr_state = \"\"\n \n css_class = \" \".join(filter(None,[\"instr\", instr_type, instr_state]))\n \n harp_render = f'<div class=\"{css_class}\" id=\"instr-{instrument.get_index()}\">'\n\n (rows, cols) = instrument.get_shape()\n\n for row in range(rows):\n #harp_render += '\\n'\n for col in range(cols):\n note = instrument.get_note_from_position((row, col))\n note_render = note_renderer.render(note) \n harp_render += note_render\n harp_render += '</div>' \n\n if instrument.get_repeat() > 1:\n harp_render += (f'<div class=\"repeat\">x{instrument.get_repeat()}</div>')\n\n return harp_render", "def ly(self, l: int, lfrac: float) -> float:\n self._check_lfrac(lfrac)\n self._raise_if_not_line(l)\n result = self._read_inline(f\"ly({l},{lfrac})\")\n return result", "async def render_all():\n\tglobal rendered_until; rendered_until = 0 # Disable the delay\n\tlogging.debug(\"enqueueing all tracks\")\n\tdatabase.enqueue_all_tracks()\n\tlogging.debug(\"renderer started\")\n\tglobal ffmpeg\n\tffmpeg = await asyncio.create_subprocess_exec(\"ffmpeg\", \"-y\", \"-ac\", \"2\", \"-f\", \"s16le\", \"-i\", \"-\", \"next_glitch.mp3\",\n\t\tstdin=subprocess.PIPE, stdout=subprocess.DEVNULL)\n\tasyncio.ensure_future(infinitely_glitch())\n\tawait ffmpeg.wait()\n\tos.replace(\"next_glitch.mp3\", \"major_glitch.mp3\")", "def render(sim_file: str, only_stuck: bool) -> None:\n import DLA\n DLA.GREEN = (0, 0, 0) # type: ignore\n DLA.WHITE = (151, 151, 151, 150) # type: ignore\n from DLA import config\n config.USE_PYGAME = True # type: ignore\n from DLA import renderer\n renderer.render(Path(sim_file), only_stuck)", "def chopper_lyrics(request, chopper_id):\n chopper = Chopper.objects.get(pk=chopper_id)\n return render(request, \"lyrics.html\", context={\"song\":chopper})", "def write_lily_src_files():\n skip_cmds = []\n print('Write .ly files for each entry:')\n for cmd_name in lg.in_cmds:\n print('- ' + cmd_name)\n gen_src_name = os.path.join(lg.dir_lysrc, cmd_filename(cmd_name) + '.ly')\n # handle existing commands\n if os.path.exists(gen_src_name):\n action = ''\n while not (action == 'Y' or action == 'N'):\n action = input('already present. Overwrite (y/n)? ')\n action = action.upper()\n if action == 'N':\n skip_cmds.append(cmd_name)\n continue\n \n # open a single lily src file for write access\n fout = open(gen_src_name, 'w')\n\n #output the license information\n fout.write(lg.lilyglyphs_copyright_string)\n fout.write('\\n')\n\n #output information on the actual file\n write_file_info(cmd_name, fout)\n\n #write the default LilyPond stuff\n fout.write(lily_src_prefix)\n\n # write the comment for the command\n fout.write('%{\\n')\n for line in lg.in_cmds[cmd_name]['comment']:\n fout.write(line + '\\n')\n fout.write('%}\\n\\n')\n\n # write the actual command\n fout.write(cmd_name + ' = {\\n')\n for line in lg.in_cmds[cmd_name]['lilySrc']:\n fout.write(line + '\\n')\n fout.write('}\\n')\n\n # write the score definition\n fout.write(lily_src_score)\n\n # finish the LilyPond file\n fout.write(' \\\\' + cmd_name + '\\n')\n fout.write('}\\n\\n')\n\n fout.close()\n \n # remove skipped commands from in_cmds\n print(skip_cmds)\n for cmd_name in skip_cmds:\n del lg.in_cmds[cmd_name]\n lg.lily_files.remove(cmd_filename(cmd_name))", "def midi_to_lilypond_note(note):\n return all_notes[note+4]", "def render3D(self):\n self.threedFrame.threedRWI.Render()", "def build(self, **kwargs):\n self.lexer = ply.lex.lex(object=self, **kwargs)", "def _compile(self):\n self.display_list.begin()\n\n ox = .25\n oy = .33\n last_tex = None\n for i in self.sides:\n ix = 0\n x, y = self.split_coords[i[5]]\n x *= ox\n y *= oy\n coords = ((x+ox, y+oy), (x+ox, y), (x, y), (x, y+oy))\n\n glBegin(GL_QUADS)\n\n glNormal3f(*self.normals[i[6]])\n\n for x in i[:4]:\n glTexCoord2fv(coords[ix])\n a, b, c = self.corners[x]\n glVertex3f(a,b,c)\n ix += 1\n glEnd()\n self.display_list.end()", "def dspyRender(self):\n pass", "def bar_to_lilypond_notes(notes):\n lp_notes = []\n if notes[0] is None:\n notes = notes[1:]\n if notes[0] is None:\n lp_notes.append(\"r\")\n for n in notes:\n if n is None:\n continue\n if type(n) is list:\n lp_notes.append([midi_to_lilypond_note(x) for x in n])\n else:\n lp_notes.append(midi_to_lilypond_note(n))\n return lp_notes", "def prepare_svg(song):\n\n target_dir = 'templates/heads'\n # todo: ensure dir exists\n work_dir = 'work/svg'\n if should_skip(f'{target_dir}/{song[\"filename\"]}.svg'):\n # todo: log we decided to skip it\n print(f'skipping svg for {song[\"filename\"]}')\n return\n print('starting svg')\n first_line = '#(set-global-staff-size 34)'\n score = first_line + '\\n' + song['score']\n # score = score.replace('\\easyHeadsOn', '\\easyHeadsOff')\n with open(f'{SCORE_ONLY_PATH}/{song[\"filename\"]}.ly', 'w') as so:\n so.write(score)\n\n # would it be nicer to have this in some config aside ?\n options = [\n '-fsvg',\n '-dbackend=svg',\n f'--output={work_dir}',\n '-dno-point-and-click',\n '-s',\n '-dcrop',\n f'{SCORE_ONLY_PATH}/{song[\"filename\"]}.ly'\n ]\n # lily is decided to create 2 files, delete the other one\n run_lily(options)\n os.remove(f'{work_dir}/{song[\"filename\"]}.svg')\n with open(f'{work_dir}/{song[\"filename\"]}.cropped.svg') as cropped:\n svg_text = cropped.read()\n svg_text = svg_text.replace('<svg ', '<svg class=\"score\" ')\n # todo: include in jinja also from other dirs ?\n with open(f'{target_dir}/{song[\"filename\"]}.svg', 'w') as svg_res:\n svg_res.write(svg_text)\n os.remove(f'{work_dir}/{song[\"filename\"]}.cropped.svg')", "def render(tiddler, environ):\n config = environ.get('tiddlyweb.config', {})\n default_renderer = config.get('multirender.default', DEFAULT_RENDERER)\n renderer_name = environ.get('tiddlyweb.render', default_renderer)\n try:\n imported_module = __import__('tiddlyweb.wikitext.%s' % renderer_name,\n {}, {}, ['render'])\n except ImportError, err:\n err1 = err\n try:\n imported_module = __import__(renderer_name, {}, {}, ['render'])\n except ImportError, err:\n raise ImportError(\"multirender couldn't load module for %s: %s, %s\" %\n (renderer_name, err, err1))\n return imported_module.render(tiddler, environ)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function places an order for "context.index" in the amount required to neutralize the beta exposure of the portfolio. Note that additional leverage in the account is taken on, however, net market exposure is reduced.
def hedge_portfolio(context, data): factors = get_alphas_and_betas(context, data) beta_exposure = 0.0 count = 0 for asset in context.portfolio.positions: if asset in factors and asset != context.index: if not np.isnan(factors[asset].beta): beta_exposure += factors[asset].beta count += 1 beta_hedge = -1.0 * beta_exposure / count dollar_amount = context.portfolio.portfolio_value * beta_hedge record(beta_hedge=beta_hedge) if not np.isnan(dollar_amount): order_target_value(context.index, dollar_amount)
[ "def trade(context, data):\n # Create a single series from our stock and bond weights\n total_weights = pd.concat([context.stock_weights, context.bond_weights])\n \n # Create a TargetWeights objective\n target_weights = opt.TargetWeights(total_weights)\n \n # Execute the order_optimal_portfolio method with above objective and any constraint\n order_optimal_portfolio(\n objective = target_weights,\n constraints = []\n )\n #Log our holdings\n log.info( [ str(s.symbol) for s in sorted(context.portfolio.positions) ] )\n #print(\"Cash: \" + str(context.portfolio.cash))\n # Record our weights for insight into stock/bond mix and impact of trend following\n record(stocks=context.stock_weights.sum(), bonds=context.bond_weights.sum())", "def get_alphas_and_betas(context, data):\r\n all_assets = context.portfolio.positions.keys()\r\n if context.index not in all_assets:\r\n all_assets.append(context.index)\r\n prices = data.history(all_assets, 'price', context.lookback, '1d')\r\n returns = prices.pct_change()[1:]\r\n # index_returns = returns[context.index]\r\n factors = {}\r\n for asset in context.portfolio.positions:\r\n try:\r\n y = returns[asset]\r\n factors[asset] = linreg(returns[context.index], y)\r\n except:\r\n log.warn(\"[Failed Beta Calculation] asset = %s\" % asset.symbol)\r\n return pd.DataFrame(factors, index=['alpha', 'beta'])", "def place_stop_orders(context):\n for position in context.portfolio.positions:\n market = continuous_future(position.root_symbol)\n amount = context.portfolio.positions[position].amount\n cost_basis = context.portfolio.positions[position].cost_basis\n \n try:\n context.price = context.prices[market].close[-1]\n except KeyError:\n context.price = 0\n\n if not context.has_stop[market]:\n if amount > 0 and context.price >= cost_basis:\n context.stop[market] = context.price\\\n - context.average_true_range[market]\\\n * context.stop_multiplier\n elif amount > 0 and context.price < cost_basis:\n context.stop[market] = cost_basis\\\n - context.average_true_range[market]\\\n * context.stop_multiplier\n elif amount > 0 and context.price == 0:\n context.stop[market] = cost_basis\\\n - context.average_true_range[market]\\\n * context.stop_multiplier\n elif amount < 0 and context.price >= cost_basis:\n context.stop[market] = cost_basis\\\n + context.average_true_range[market]\\\n * context.stop_multiplier\n elif amount < 0 and context.price < cost_basis:\n context.stop[market] = context.price\\\n + context.average_true_range[market]\\\n * context.stop_multiplier\n elif amount < 0 and context.price == 0:\n context.stop[market] = cost_basis\\\n + context.average_true_range[market]\\\n * context.stop_multiplier\n\n order_identifier = order_target(\n position,\n 0,\n style=StopOrder(context.stop[market])\n )\n if order_identifier is not None:\n context.orders[market].append(order_identifier)\n context.has_stop[market] = True\n\n if context.is_info:\n log.info(\n 'Stop %s %.2f'\n % (\n market.root_symbol,\n context.stop[market]\n )\n )", "def limit_order(self):\n \n portfolio_USD_current = 2000.00 #replace with model\n portfolio_USD_start = 2000.00\n portfolio_EUR_current = 1600.00\n portfolio_EUR_start = 1600.00\n \n \n currency_options = dict( #replace with model\n currency_pair='USD',\n bid={5.26 : {'guy_1' : 100.00}},\n ask={5.27 : {'guy_2' : 200.00}},\n time=datetime.datetime.now()\n )\n currency_pair_state_USD = CurrencyPairState(**currency_options)\n currency_options = dict(\n currency_pair='EUR',\n bid={4.24 : {'guy_3' : 100.00}},\n ask={4.25 : {'guy_4' : 100.00, 'guy_5' : 100.00}, 4.26 : {'guy_6' : 50.00}},\n time=datetime.datetime.now()\n )\n currency_pair_state_EUR = CurrencyPairState(**currency_options)\n \n (buy_trade,sell_trade) = self.produce_trade_pair_forward(currency_pair_state_USD, currency_pair_state_EUR) #produce naive pair\n \n gain_total = self.gain(buy_trade,sell_trade,currency_pair_state_USD,currency_pair_state_EUR) #solve for gain\n \n if(gain_total <= 0): #attempt in other direction if the trade doesnt make money\n (buy_trade,sell_trade) = self.produce_trade_pair_forward(currency_pair_state_EUR, currency_pair_state_USD)\n \n gain_total = self.gain(buy_trade,sell_trade,currency_pair_state_EUR,currency_pair_state_USD)\n if(gain_total <= 0): #trade doesnt make money, don't return a trade pair at this time.\n return None\n \n new_trade_volume = portfolio_manager(portfolio_EUR_start,portfolio_EUR_current,sell_trade.get_volume(),sell_trade.get_price(),gain_total)\n #check portfolio manager for the amount we're willing to trade\n buy_trade.set_volume(new_trade_volume)\n sell_trade.set_volume(new_trade_volume)\n return [buy_trade,sell_trade]\n \n new_trade_volume = portfolio_manager(portfolio_USD_start,portfolio_USD_current,sell_trade.get_volume(),sell_trade.get_price(),gain_total)\n \n buy_trade.set_volume(new_trade_volume)\n sell_trade.set_volume(new_trade_volume)\n return [buy_trade,sell_trade]\n \n \n \n #trade_options = dict(\n # currency_pair = 'EUR',\n # buy_BTC = True,\n # price = 13.00,\n # volume = 300.00,\n # time_expiry = datetime.datetime.now()\n #)\n #return Trade(**trade_options)", "def stop_loss_equities(context, data, log):\n positions = context.portfolio.positions\n if len(positions) == 0:\n return\n\n for stock, position in list(positions.items()):\n cost_basis = position.cost_basis\n if cost_basis == 0:\n cost_basis = position.last_sale_price\n pl_pct = (position.last_sale_price - cost_basis) / cost_basis\n if pl_pct <= context.PARAM['loss_limit']:\n log.warn(\"%s positions closed with a loss of %.2f.\" % (stock, 100.0*pl_pct))\n if data.can_trade(stock):\n order_target(stock, 0)", "def performance_vs_index(self, index='SPY', dateIni='Ini', dateFin='Fin'):\n if dateFin == 'Fin':\n dateFin = self.data.index[-1]\n if dateIni == 'Ini':\n dateIni = self.data.index[0]\n portfolioGains = round(self.data.loc[self.data.index[-1], 'Profit/Loss%'], 2)\n else:\n pData = self.data.loc[dateIni:dateFin]\n pData.loc[:,'Profit/Loss'] = pData['Gains'].cumsum()\n pData.loc[:,'Profit/Loss%'] = pData['Profit/Loss'] / pData['Invested'] * 100\n portfolioGains = round(pData.loc[pData.index[-1], 'Profit/Loss%'], 2)\n indexData = yf.Ticker(index).history(start=dateIni, end=dateFin)\n indexData['Var%'] = (indexData.Close - indexData.Close[0]) / indexData.Close[0] * 100\n indexGains = round(indexData.loc[indexData.index[-1], 'Var%'], 2)\n return portfolioGains, indexGains, portfolioGains - indexGains", "def CalcEffectiveInventory(self):\r\n return (self.currentStock - self.currentOrders)", "def gbce_index(self):\n stocks_vwsp = [Stock.get_instance().get_stock_by_symbol(tr.symbol).vwsp for tr in Trade.get_instance()]\n try:\n return (reduce(operator.mul, stocks_vwsp, 1)) ** (1.0/len(stocks_vwsp))\n except ZeroDivisionError:\n return 0.0", "def choixAlphaBeta(position,prof):\n if(positionTerminale(position)):\n return 0\n else:\n coup=evalueAlphaBeta(position,prof,1,-np.inf,np.inf)\n return coup['coup']", "def test_CalculateStockItemOrders(self):\n symbol = \"XXXX\"\n\n # Create ActiveStockItem\n activeStockItem = ActiveStockItem(symbol=symbol)\n quantity = 2\n buyStepSize = 1\n activeStockItem.SellStepSize = 2\n activeStockItem.SellStepType = SellDeltaType.FIXED\n activeStockItem.StartPrice = 20.55\n activeStockItem.QuantityMultiplier = 1\n activeStockItem.MaxActiveBuy = 2\n priceCoordinates:List[PriceCoordinate] = []\n priceCoordinates.append(PriceCoordinate(startPrice=0,quantity=quantity, \n buyDeltaType=BuyDeltaType.FIXED, fixedBuyDelta=buyStepSize))\n activeStockItem.PriceCoordinates = priceCoordinates\n\n # Create PortfolioPosition\n portfolioPosition = PortfolioPosition(symbol=symbol)\n portfolioPosition.Quantity = 9\n \n expectedLimitOrders:List[OrderInfo] = [\n OrderInfo(Settings.NewOrderId, symbol, 22.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 21.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 20.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 19.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 18.55, 1, True, True),\n OrderInfo(Settings.NewOrderId, symbol, 16.55, 1, True, False),\n OrderInfo(Settings.NewOrderId, symbol, 15.55, 2, False, False)\n ]\n\n possibleLimitOrders:List[OrderInfo] = self.manageOrdersHelpers.GeneratePossibleLimitOrders(activeStockItem, portfolioPosition.Quantity)\n\n self.assertSequenceEqual(expectedLimitOrders, possibleLimitOrders)\n\n placeOrders, cancelOrders = self.moneyMaker.CalculateStockItemOrders(activeStockItem, [], portfolioPosition)\n\n print(placeOrders)\n\n print(cancelOrders)\n\n for activeStockItem in ActiveStockItems:\n print(activeStockItem.Symbol)", "def wealth_index(self) -> pd.DataFrame:\n df = self._add_inflation()\n df = Frame.get_wealth_indexes(df)\n if isinstance(df, pd.Series): # should always return a DataFrame\n df = df.to_frame()\n df.rename({1: \"portfolio\"}, axis=\"columns\", inplace=True)\n return df", "def stop_loss_portfolio(context, data, log):\n\n positions = context.portfolio.positions\n if len(positions) == 0:\n return\n\n initial_value = 0\n last_value = 0\n\n for stock, position in list(positions.items()):\n cost_basis = position.cost_basis\n if cost_basis == 0:\n cost_basis = position.last_sale_price\n\n initial_value += cost_basis * position.amount\n last_value += position.last_sale_price * position.amount\n\n montly_return = last_value / initial_value - 1.0\n if montly_return <= context.PARAM['loss_limit']:\n log.warn(\"Montly loss (%.2f) exceeded the loss limit: close all positions.\" % (100.0*montly_return))\n close_all(context, data)", "def blank_future_eta(request):\n today = datetime.datetime.today()\n today = today.date()\n\n orders = OrderDetail.objects.filter(eta__gt=today)\n for order in orders:\n order.eta = None\n order.save()\n\n return HttpResponse('ok', mimetype='text/plain')", "def rebalance(context, data):\n\n cancel_all_orders(context, data)\n sell_stocks_not_in_portfolio(context, data)\n\n LOG.info(\"rebalancing\")\n LOG.info(context.stocks)\n totals = calculate_totals(context, data)\n LOG.info(\"totals calculated: %s\" % totals)\n for stock, info in totals.items():\n order(stock, info[\"total\"])", "def alpha13(df):\n return (-1 * u.rank(u.cov(u.rank(df.close), u.rank(df.volume), 5)))", "def alpha25(df):\n return u.rank(((((-1 * df.returns) * u.adv(df, 20)) * df.vwap) * (df.high - df.close)))", "def size_order(self, portfolio, initial_order):\n ticker = initial_order.ticker\n if initial_order.action == \"EXIT\":\n # Obtain current quantity and liquidate\n cur_quantity = portfolio.positions[ticker].quantity\n if cur_quantity > 0:\n initial_order.action = \"SLD\"\n initial_order.quantity = cur_quantity\n else:\n pass\n #initial_order.action = \"BOT\"\n #initial_order.quantity = cur_quantity\n else:\n if initial_order.action == 'BOT':\n weight = self.ticker_weight\n # Determine total portfolio value, work out dollar weight\n # and finally determine integer quantity of shares to purchase\n price = portfolio.price_handler.tickers[ticker][\"close\"]\n price = PriceParser.display(price)\n equity = PriceParser.display(portfolio.equity)\n dollar_weight = weight * equity\n weighted_quantity = int(floor(dollar_weight / price))\n weighted_quantity = (int(weighted_quantity/100)) * 100\n initial_order.quantity = weighted_quantity\n else:\n cur_quantity = portfolio.positions[ticker].quantity\n initial_order.action = \"SLD\"\n initial_order.quantity = cur_quantity\n\n return initial_order", "def book_returnofcapital(\n transaction: Union[ReturnOfCapital, models.Transaction],\n portfolio: PortfolioType,\n **_,\n) -> List[Gain]:\n pocket = (transaction.fiaccount, transaction.security)\n position = portfolio.get(pocket, [])\n\n unaffected, affected = utils.partition(longAsOf(transaction.datetime), position)\n affected = list(affected)\n if not affected:\n msg = (\n f\"Return of capital {transaction}:\\n\"\n f\"FI account {transaction.fiaccount} has no long position in \"\n f\"{transaction.security} as of {transaction.datetime}\"\n )\n raise Inconsistent(transaction, msg)\n\n adjustedLots, gains = functions.adjust_price(affected, transaction)\n portfolio[pocket] = adjustedLots + list(unaffected)\n return gains", "def __sell(self, order, portfolio):\n amount = order.price * order.volume\n portfolio.remove_stock(order.symbol, order.volume)\n portfolio.add_cash(amount)\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns a dataframe of 'alpha' and 'beta' exposures for each asset in the current universe.
def get_alphas_and_betas(context, data): all_assets = context.portfolio.positions.keys() if context.index not in all_assets: all_assets.append(context.index) prices = data.history(all_assets, 'price', context.lookback, '1d') returns = prices.pct_change()[1:] # index_returns = returns[context.index] factors = {} for asset in context.portfolio.positions: try: y = returns[asset] factors[asset] = linreg(returns[context.index], y) except: log.warn("[Failed Beta Calculation] asset = %s" % asset.symbol) return pd.DataFrame(factors, index=['alpha', 'beta'])
[ "def exposure(self, universe: pd.DataFrame) -> pd.DataFrame:\n exposure = pd.DataFrame(0.0, index=universe.index, columns=universe.columns)\n exposure.loc[:, self.asset] = universe.loc[:, self.asset] * self.lot\n i_entry, i_close = universe.index.get_indexer([self.entry, self.close])\n exposure[: min(i_entry + 1, exposure.shape[0])] = 0\n exposure[min(i_close + 1, exposure.shape[0]) :] = 0\n return exposure", "def get_alphas(portfolio_returns,risk_free,market_returns,betas):\r\n \r\n R = portfolio_returns\r\n Rf = risk_free\r\n Beta = betas\r\n Rm = market_returns\r\n alpha = R - Rf - (Beta*(Rm-Rf))\r\n \r\n return alpha", "def create_beta_posteriors(df):\n goods = df.num_matured - df.fpd\n df['alpha_p'] = df.alpha + df.fpd\n df['beta_p'] = df.beta + goods\n return df", "def get_assets(self):\r\n logging.info('Getting Assets and Storage assets')\r\n\r\n # EXTRACTING OPTIMISATION MODEL RESULTS\r\n # installed capacity\r\n f = self.esom.get_var('F')\r\n # energy produced by the technology\r\n f_year = self.ta.from_td_to_year(ts_td=self.esom.get_var('F_t')\r\n .reset_index().set_index(['Typical_days', 'Hours'])) \\\r\n .groupby(['Regions', 'Technologies']).sum() \\\r\n .rename(columns={'F_t': 'F_year'})\r\n # Get Storage_power (power balance at each hour)\r\n storage_in = self.esom.get_var('Storage_in') \\\r\n .groupby(['Regions', 'I in storage_tech', 'Hours', 'Typical_days']).sum()\r\n storage_out = self.esom.get_var('Storage_out') \\\r\n .groupby(['Regions', 'I in storage_tech', 'Hours', 'Typical_days']).sum()\r\n\r\n # ASSETS COMPUTATIONS\r\n # Get the bounds on F (f_min,f_max)\r\n # create frames for concatenation (list of df to concat)\r\n frames = list()\r\n for n, r in self.regions.items():\r\n frames.append(r.data['Technologies'].loc[:, ['f_min', 'f_max']].copy())\r\n assets = f.merge(pd.concat(frames, axis=0, keys=self.regions_names)\r\n , left_on=['Regions', 'Technologies'], right_index=True) \\\r\n .merge(f_year, left_on=['Regions', 'Technologies'], right_on=['Regions', 'Technologies']).reset_index()\r\n # set Regions and Technologies as categorical data and sort it\r\n assets['Regions'] = pd.Categorical(assets['Regions'], self.regions_names)\r\n self.categorical_esmc(df=assets, col_name='Technologies', el_name='Technologies')\r\n assets.sort_values(by=['Regions', 'Technologies'], axis=0, ignore_index=True, inplace=True)\r\n assets.set_index(['Regions', 'Technologies'], inplace=True)\r\n # put very small values as nan\r\n treshold = 1e-2\r\n assets = assets.mask((assets > -treshold) & (assets < treshold), np.nan)\r\n treshold = 1e-1\r\n assets['F_year'] = assets['F_year'].mask((assets['F_year'] > -treshold) & (assets['F_year'] < treshold), np.nan)\r\n\r\n # STORAGE ASSETS COMPUTATIONS\r\n # compute the balance\r\n storage_power = storage_out.merge(-storage_in, left_index=True, right_index=True)\r\n storage_power['Storage_power'] = storage_power['Storage_out'] + storage_power['Storage_in']\r\n # losses are the sum of the balance over the year\r\n sto_losses = self.ta.from_td_to_year(ts_td=storage_power['Storage_power']\r\n .reset_index().set_index(['Typical_days', 'Hours'])) \\\r\n .groupby(['Regions', 'I in storage_tech']).sum()\r\n # Update F_year in assets df for STORAGE_TECH\r\n assets.loc[sto_losses.index, 'F_year'] = sto_losses['Storage_power']\r\n # replace Storage_in and Storage_out by values deduced from Storage_power\r\n # such that at each hour the flow goes only in 1 direction\r\n threshold = 1e-2\r\n storage_power['Storage_in'] = storage_power['Storage_power'].mask((storage_power['Storage_power'] > -threshold),\r\n np.nan)\r\n storage_power['Storage_out'] = storage_power['Storage_power'].mask((storage_power['Storage_power'] < threshold),\r\n np.nan)\r\n # Compute total over the year by mapping TD\r\n sto_flux_year = self.ta.from_td_to_year(ts_td=storage_power.reset_index().set_index(['Typical_days', 'Hours'])) \\\r\n .groupby(['Regions', 'I in storage_tech']).sum() \\\r\n .rename(columns={'Storage_out': 'Year_energy_flux'}).drop(columns=['Storage_in', 'Storage_power'])\r\n # create sto_assets from copy() of assets\r\n sto_assets = assets.copy()\r\n sto_assets.rename(columns={'F_year': 'Losses'}, inplace=True)\r\n # merge it with sto_flux_year\r\n sto_flux_year.index.set_names(sto_assets.index.names, inplace=True) # set proper name to index\r\n sto_assets = sto_assets.merge(sto_flux_year, left_index=True, right_on=['Regions', 'Technologies'],\r\n how='right')\r\n # Get storage_charge_time and storage_discharge_time from input data\r\n # and compute maximum input and output power of the storage technology\r\n frames = list()\r\n for n, r in self.regions.items():\r\n frames.append(r.data['Storage_power_to_energy'].copy())\r\n sto_assets = sto_assets.merge(pd.concat(frames, axis=0, keys=self.regions_names)\r\n , left_on=['Regions', 'Technologies'], right_index=True)\r\n sto_assets['Storage_in_max'] = sto_assets['F'] / sto_assets['storage_charge_time']\r\n sto_assets['Storage_out_max'] = sto_assets['F'] / sto_assets['storage_discharge_time']\r\n sto_assets.drop(columns=['storage_charge_time', 'storage_discharge_time'], inplace=True)\r\n # set Region and Technology as categorical data and sort it\r\n sto_assets.reset_index(inplace=True)\r\n sto_assets['Regions'] = pd.Categorical(sto_assets['Regions'], self.regions_names)\r\n self.categorical_esmc(df=sto_assets, col_name='Technologies', el_name='Technologies')\r\n sto_assets.sort_values(by=['Regions', 'Technologies'], axis=0, ignore_index=True, inplace=True)\r\n sto_assets.set_index(['Regions', 'Technologies'], inplace=True)\r\n # put very small values as nan\r\n treshold = 1\r\n sto_assets = sto_assets.mask((sto_assets > -treshold) & (sto_assets < treshold), np.nan)\r\n\r\n # Store into results\r\n self.results['Assets'] = assets\r\n self.results['Sto_assets'] = sto_assets\r\n return", "def test_get_all_assets_df_true_asset_field_and_asset_metric():\n asset_fields = ['metrics']\n asset_metric = 'marketcap'\n assert (get_all_assets(asset_fields=asset_fields, asset_metric=asset_metric, to_dataframe=True), DataFrame)", "def get_portfolio_df(self):\n df = pd.DataFrame(data=self.asset_manager.asset_history)\n return df.set_index('time')", "def factor_exposure(self):\n exp_hs_all = pd.DataFrame([])\n exp_zz_all = pd.DataFrame([])\n for i in range(len(self.weekly_date)):\n date = self.weekly_date.iloc[i,0]\n factor = get_barra_factor_from_sql(date)\n factor['secID'] = factor.index.tolist()\n stocklist = factor.index.tolist()\n \n hs300 = get_index_composition(date,'000300.SH')\n zz500 = get_index_composition(date,'000905.SH')\n hs300['secID'] = hs300.index.tolist()\n zz500['secID'] = zz500.index.tolist()\n \n stocklist_hs300 = list(set(hs300.index.tolist()).intersection(set(stocklist)))\n stocklist_zz500 = list(set(zz500.index.tolist()).intersection(set(stocklist)))\n stocklist_hs300.sort()\n stocklist_zz500.sort()\n \n factor_hs = extract_part_from_all(stocklist_hs300,factor,'secID')\n factor_zz = extract_part_from_all(stocklist_zz500,factor,'secID')\n hs_weight = extract_part_from_all(stocklist_hs300,hs300,'secID')\n zz_weight = extract_part_from_all(stocklist_zz500,zz500,'secID')\n del factor_hs['secID'],factor_zz['secID'],hs_weight['secID'],zz_weight['secID']\n \n \n exp_hs = pd.DataFrame(np.dot(hs_weight.T,factor_hs))\n exp_zz = pd.DataFrame(np.dot(zz_weight.T,factor_zz))\n \n \n exp_hs_all = pd.concat([exp_hs_all,exp_hs], axis = 0)\n exp_zz_all = pd.concat([exp_zz_all,exp_zz], axis = 0) \n print(i)\n exp_hs_all.columns = ['Beta','Momentum','Size','EY','RV','Growth',\\\n 'BP','Leverage','Liquidity']\n exp_zz_all.columns = ['Beta','Momentum','Size','EY','RV','Growth',\\\n 'BP','Leverage','Liquidity']\n exp_hs_all.index = self.weekly_date.iloc[:,0]\n exp_zz_all.index = self.weekly_date.iloc[:,0]\n return exp_hs_all,exp_zz_all", "def test_get_all_assets_df_true_with_asset_fields():\n asset_fields = ['metrics']\n assert isinstance(get_all_assets(asset_fields=asset_fields, to_dataframe=True), DataFrame)", "def table(self) -> pd.DataFrame:\n x = pd.DataFrame(\n data={\n \"asset name\": list(self.names.values()),\n \"ticker\": list(self.names.keys()),\n }\n )\n x[\"weights\"] = self.weights\n return x", "def getExpenseAnalysis(ticker):\n\tdfSGA = getFinancialData(ticker,['SellingGeneralAndAdministration']).drop(['periodType'],axis=1).rename(columns={'SellingGeneralAndAdministration' : 'SG&A'})\n\tdfRandD = getFinancialData(ticker,['ResearchAndDevelopment']).drop(['periodType'],axis=1).rename(columns={'ResearchAndDevelopment' : 'R&D'})\n\tdfCOGS = getFinancialData(ticker,['CostOfRevenue','InterestExpense']).drop(['periodType'],axis=1).rename(columns={'CostOfRevenue' : 'COGS', 'InterestExpense':'Interest'})\n\tdfOtherOperating = getFinancialData(ticker,['OperatingExpense']).drop(['periodType'],axis=1).rename(columns={'OperatingExpense' : 'Operation'})\n\tdf1 = pd.merge(dfSGA, dfRandD, on=['asOfDate'])\n\tdf2 = pd.merge(dfCOGS, dfOtherOperating, on='asOfDate')\n\texpenseData = pd.merge(df1, df2, on=['asOfDate'])\n\texpenseData['Year'] = pd.DatetimeIndex(expenseData['asOfDate']).year\n\texpenseData = expenseData.drop(['asOfDate'],axis=1)\n\texpenseData.set_index('Year', inplace=True)\n\tdfTranspose = expenseData.transpose()\n\treturn dfTranspose", "def test_get_all_assets_df_true_with_asset_metric():\n asset_metric = 'marketcap'\n assert isinstance(get_all_assets(asset_metric=asset_metric, to_dataframe=True), DataFrame)", "def create_beta_priors(df):\n df['alpha'] = np.minimum(np.maximum((1 - df.expected) * np.power(df.expected, 2) / df.variance - df.expected, 0.1), 15)\n df['beta'] = df.alpha / df.expected - df.alpha\n return df", "def beta_and_alpha(self):\n # make scatter plot\n sp_temp = self.daily_returns(self.sp.rename(columns={'Adj Close': '^GSPC'}))\n symbol_temp = self.daily_returns(self.daily.rename(columns={'Adj Close': self.symbol}))\n joined = sp_temp.merge(symbol_temp, on='Date')\n\n # beta and alpha\n beta, alpha = np.polyfit(joined[\"^GSPC\"], joined[self.symbol], 1)\n beta = round(beta, 3)\n alpha = round(alpha, 5)\n if alpha > 0:\n self.buys += 1\n self.debug += '\\nAlpha > 0: buys + {}'.format(alpha)\n else:\n self.debug += '\\nAlpha < 0: {}'.format(alpha)\n\n # assuming favorable market conditions. else, it would be sells + 1.\n if beta > 1:\n self.buys += 1\n self.debug += '\\nBeta > 1: buys + {}'.format(beta)\n else:\n self.debug += '\\nBeta < 1: {}'.format(beta)\n\n # finish plotting scatter\n if self.will_plot:\n ax = joined.plot(title=self.symbol + ' vs The Market', kind = 'scatter', x='^GSPC', y=self.symbol)\n ax.set_xlabel(\"S&P 500\")\n plt.plot(joined[\"^GSPC\"], beta * joined['^GSPC'] + alpha, '-', color='r', label='Correlation')\n\n # plot expected beta (slope) of 1 and alpha (y- int.) of zero\n plt.plot(joined[\"^GSPC\"], 1 * joined['^GSPC'] + 0, '-', color='gray', label='Beta of 1')\n plt.plot(joined[\"^GSPC\"], 0 * joined['^GSPC'] + 0, '-', color='gray', label='Alpha of 0')\n plt.legend(loc='best')", "def _fetch_dataframe(self):\n df = pd.DataFrame([self._reshape(artifact) for artifact in self._get_list_artifacts()])\n return df", "def beta_energies(self):\n return self._beta_energies", "def compute_cap_exposures(positions, caps):\n\n long_exposures = []\n short_exposures = []\n gross_exposures = []\n net_exposures = []\n\n positions_wo_cash = positions.drop('cash', axis='columns')\n tot_gross_exposure = positions_wo_cash.abs().sum(axis='columns')\n tot_long_exposure = positions_wo_cash[positions_wo_cash > 0] \\\n .sum(axis='columns')\n tot_short_exposure = positions_wo_cash[positions_wo_cash < 0] \\\n .abs().sum(axis='columns')\n\n for bucket_name, boundaries in CAP_BUCKETS.items():\n in_bucket = positions_wo_cash[(caps >= boundaries[0]) &\n (caps <= boundaries[1])]\n\n gross_bucket = in_bucket.abs().sum(axis='columns') \\\n .divide(tot_gross_exposure)\n long_bucket = in_bucket[in_bucket > 0] \\\n .sum(axis='columns').divide(tot_long_exposure)\n short_bucket = in_bucket[in_bucket < 0] \\\n .sum(axis='columns').divide(tot_short_exposure)\n net_bucket = long_bucket.subtract(short_bucket)\n\n gross_exposures.append(gross_bucket)\n long_exposures.append(long_bucket)\n short_exposures.append(short_bucket)\n net_exposures.append(net_bucket)\n\n return long_exposures, short_exposures, gross_exposures, net_exposures", "def _compute_alpha(self):\n t = self._compute_step_length()\n fs = self._get_info(\"f\")\n dfs = self._get_info(\"df\")\n var_fs = self._get_info(\"var_f\")\n var_dfs = self._get_info(\"var_df\")\n\n # Compute alpha\n mu = _fit_quadratic(t, fs, dfs, var_fs, var_dfs)\n alpha = _get_alpha(mu, t)\n\n return alpha", "def alpha_energies(self):\n return self._alpha_energies", "def test_get_asset_dataframe_output(asset_data):\n asset_fields = ['id', 'symbol']\n asset_keys, _, _, _ = asset_data\n assert isinstance(get_asset(asset_keys, asset_fields), DataFrame)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes charracters listed in self.custom_chars
def _remove_custom_chars(self, text: str) -> str: patterns = "|".join([x for x in self.custom_chars]) return re.sub(patterns, "", str(text), flags=re.IGNORECASE)
[ "def remove_special_characters(self, txt: str) -> str:", "def removeSpecialChars(self) -> None:\n self.text = re.sub('[^a-zA-z0-9\\n\\.\\s]', '', self.text)", "def _remove_special_chars(self, text: str) -> str:\n pattern = re.compile(self.special_chars_pattern)\n text = re.sub(pattern, \" \", text)\n return text", "def rmchars(value):\n value = re.sub(\"[^A-Za-z0-9.-]+\", \"\", value)\n return value", "def remove_special(s):\n return ansi_escape_chars.sub('', s)", "def remove_non_alphabetic_text(text):\n return RegexFilters.replace_non_alphabetic_text(text, \"\")", "def remove_bad_characters(self):\n\n self.categorie_name = self.categorie_name.replace(\"\\n\", \"\")", "def remove_special_chars(text):\n \n text = re.sub(' +', ' ', re.sub('[^A-Za-z ]+', ' ', text).strip())\n return text", "def remove_special_characters_from_text(text) -> str:\n return re.sub(r'[^\\w\\s]', '', text.strip())", "def other_chars(self):\n return [sign for sign in re.findall(r'[^\\w\\s]', self.text)]", "def remove_special_chars(self, text_list):\n return [self._remove_special_chars(text) for text in text_list]", "def _clean(self, text):\n if len(self.alph) == 26:\n text = sub('[\\n\\t ' + string.punctuation + ']+?', '', text)\n else:\n text = sub('[\\n\\t]+?', '', text)\n\n text = text.lower()\n text = text.encode('ascii', 'ignore').decode()\n return text", "def remove_repeated_chars(self, text):\n text = re.sub(r'([!@#$%^&*,./?\\'\";:\\\\])\\1+', r'\\1', text)\n return text", "def remove_punctations_fun(self): \n self.doc = re.sub('[^a-zA-Z0-9]', ' ', self.doc)", "def text_without(characters: str) -> Word:\n return Word(printables, excludeChars=characters)", "def lstrip(self, chars=None): # real signature unknown; restored from __doc__\n return \"\"", "def rm_special_char(string):\n regex = re.compile(\"[^a-zA-Z ]\")\n return regex.sub(\"\", string)", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def CLEAN(text):\n return _control_char_re.sub('', text)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes strings starting with http
def _remove_urls(self, text: str) -> str: pattern = r"http\S+" return re.sub(pattern, " ", str(text))
[ "def remove_URL(sample):\n return re.sub(r\"http\\S+\", \"\", sample)", "def remove_urls(self, text):\n return re.sub(r'http.?://[^\\s]+[\\s]?', '', text)", "def clean_http_url(s: str) -> str:\n return (\n s.replace(\"/index\", \"\")[::-1]\n .replace(\"/\", \"\", 1)[::-1]\n .replace(\"http://\", \"\")\n .replace(\"https://\", \"\")\n .replace(\"/ga4gh/drs/v1/objects\", \"\")\n )", "def remove_url(tweet):\n\n pos = tweet.find(\"http\")\n if pos > -1:\n return tweet[0:pos]\n return tweet", "def strip_http_www(link: str) -> str:\n if link.startswith(\"https://\"):\n link = link.lstrip(\"https://\")\n elif link.startswith(\"http://\"):\n link = link.lstrip(\"http://\")\n if link.startswith(\"www.\"):\n link = link.lstrip(\"www.\")\n return link", "def remove_urls(text):\n pass", "def url_fix_common_typos(url):\n if url.startswith(\"http//\"):\n url = \"http://\" + url[6:]\n elif url.startswith(\"https//\"):\n url = \"https://\" + url[7:]\n return url", "def remocion_de_urls(self, texto):\n \n texto = re.sub(r'http\\S+', '', texto)\n return texto", "def remove_URLs(string):\n pattern = r'((http|ftp|https):\\/\\/)?[\\w\\-_]+(\\.[\\w\\-_]+)+([\\w\\-\\.,@?^=%&amp;:/~\\+#]*[\\w\\-\\@?^=%&amp;/~\\+#])?'\n return re.sub(pattern, ' ', string)", "def strip_proto(url):\n return re.sub(\"^[^:/]+://\", \"\", url)", "def handle_url(url) -> str:\n if \"https://\" not in url[:8] and \"http://\" not in url[:7]:\n url = \"http://\" + url\n return url", "def remove_urls(self, doc):\n doc = re.sub(\n r'(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)'\n r'(?:[^\\s()<>]+|\\(([^\\s()<>]+|'\n r'(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|'\n r'[^\\s`!()\\[\\]{};:\\'\".,<>]))',\n '',\n doc)\n return ' '.join(doc.split())", "def remove_links(tweet):\n tweet = re.sub(r'http\\S+', '', tweet) # remove http links\n tweet = re.sub(r'bit.ly/\\S+', '', tweet) # rempve bitly links\n tweet = tweet.strip('[link]') # remove [links]\n return tweet", "def remove_urls_from_text(tweet, replacement=\"\"):\n # remove http links\n tweet = re.sub(r\"http\\S+\", replacement, tweet)\n # remove bitly links\n tweet = re.sub(r\"bit.ly/\\S+\", replacement, tweet)\n # remove t.co links\n tweet = re.sub(r\"t.co/\\S+\", replacement, tweet)\n tweet = tweet.strip(\"[link]\") # remove [links]\n return tweet", "def remove_url(x):", "def test_fix_url_no_http(self):\n test_url = \"example.com\"\n res = utils.fix_url_http(test_url)\n self.assertEqual(res, \"http://\" + test_url)", "def _strip_url(self, string):\n u = urlparse(string)\n if u.scheme != 'http' and u.scheme != 'https':\n return string\n if u.fragment:\n return u.fragment\n return u.path.split('/')[-1].strip()", "def normalize_url(url):\n if not url.startswith((\"git+\", \"hg+\")):\n return url\n return url[4:]", "def strip_protocol(url):\n pattern = re.compile(r'^https?\\:')\n return re.sub(pattern, '', url)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes isolated block of digits
def _remove_digit_blocks(self, text: str) -> str: return re.sub(r"\b\d+\b", " ", str(text))
[ "def remove_free_digits(text):\n return RegexFilters.replace_free_digits(text, \" \")", "def removeDigits(self,txt):\n digitRemovedText = re.sub(r'[\\d]',\"\",txt)\n return digitRemovedText", "def remove_digits(self, text):\n return re.sub('\\d+', '', text)", "def remove_digits(box, values, digits):\n for digit in digits:\n values[box] = values[box].replace(digit, '')", "def remove_digit(self, values, box, digit):\n values[box] = values[box].replace(digit, '')\n return values", "def remove_numbers(self, doc):\n regex = re.compile('[%s]' % re.escape(self.numbers))\n return regex.sub('', doc)", "def remove(n, digit):\r\n removed = 0\r\n while n != 0:\r\n n, temp = n // 10, n % 10\r\n if temp != digit:\r\n removed = removed * 10 + temp\r\n return reverse(removed)", "def deleteNum(self, idx):\r\n self.replaceNum(idx, \"\")", "def remove_numbers_fun(self):\n self.doc = re.sub(\"[0-9]\", \"\", self.doc)", "def remove_digits(text):\n return re.sub(r'[\\d]', '', text)", "def compact(number):\n return clean(number, ' ').strip()", "def compact(number):\n return clean(number, ' -./,').strip()", "def compact(number):\n return clean(number, ' -.').strip()", "def compact(number):\n return clean(number, ' -').strip()", "def strip_leading_chars(val):\n for i, c in enumerate(val):\n if c in \"0123456789.\":\n return val[i:]\n return \"\"", "def delete_first_zeros(digit_with_zeros): \n \n digit_without_zeros = \"\"\n\n snap = 1\n \n d = 0\n\n for d in digit_with_zeros:\n\n if d != \"0\":\n snap = 0\n if snap == 0:\n digit_without_zeros +=d\n \n return digit_without_zeros", "def remove_numbers(text):\n return re.sub(r'\\d+', '',text)", "def remove_digits(s):\n all = string.maketrans('', '')\n nodigits = all.translate(all, string.digits)\n return s.translate(all, nodigits)", "def remove_numbers(self) -> None:\n self.data = [[x for x in s if re.match(\"[A-Za-z]+\", x) is not None] for s in self.data]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes special characters as defined by the pattern in self.special_chars_pattern
def _remove_special_chars(self, text: str) -> str: pattern = re.compile(self.special_chars_pattern) text = re.sub(pattern, " ", text) return text
[ "def remove_special_characters(self, txt: str) -> str:", "def removeSpecialChars(self) -> None:\n self.text = re.sub('[^a-zA-z0-9\\n\\.\\s]', '', self.text)", "def remove_special_chars(text):\n \n text = re.sub(' +', ' ', re.sub('[^A-Za-z ]+', ' ', text).strip())\n return text", "def remove_special_chars(text):\n schars = ''.join([a for a in string.punctuation if a not in \".,?\"])\n\n text = re.sub('[%s]' % re.escape(schars), '', text)\n return text", "def remove_special_characters_from_text(text) -> str:\n return re.sub(r'[^\\w\\s]', '', text.strip())", "def _remove_custom_chars(self, text: str) -> str:\n patterns = \"|\".join([x for x in self.custom_chars])\n return re.sub(patterns, \"\", str(text), flags=re.IGNORECASE)", "def test_remove_special_chars(self):\r\n document1 = \"\"\"Tf-idf stands for term frequency-inverse document frequency, and the tf-idf weight is a weight often used in information retrieval and text mining. This weight is a statistical measure used to evaluate how important a word is to a document in a collection or corpus. The importance increases proportionally to the number of times a word appears in the document but is offset by the frequency of the word in the corpus. Variations of the tf-idf weighting scheme are often used by search engines as a central tool in scoring and ranking a document's relevance given a user query.\"\"\"\r\n cleaned_text = \"\"\"Tf idf stands for term frequency inverse document frequency and the tf idf weight is a weight often used in information retrieval and text mining This weight is a statistical measure used to evaluate how important a word is to a document in a collection or corpus The importance increases proportionally to the number of times a word appears in the document but is offset by the frequency of the word in the corpus Variations of the tf idf weighting scheme are often used by search engines as a central tool in scoring and ranking a document s relevance given a user query\"\"\"\r\n clean_text = remove_special_chars(document1)\r\n self.assertTrue(len(clean_text) > 0)\r\n self.assertEquals(clean_text, cleaned_text)", "def rm_special_char(string):\n regex = re.compile(\"[^a-zA-Z ]\")\n return regex.sub(\"\", string)", "def remove_special(s):\n return ansi_escape_chars.sub('', s)", "def remove_special_characters(text):\n soup = BeautifulSoup(text, \"html.parser\")\n review = soup.get_text()\n review = r\"[^a-zA-z0-9\\s]\"\n review = re.sub(review, \"\", text)\n return review.lower()", "def remove_string_special_characters(s):\n stripped = re.sub('[^\\w\\s]', '', s)\n stripped = re.sub('_', '', stripped)\n stripped = re.sub('\\s+', ' ', stripped)\n stripped = stripped.strip()\n\n return stripped", "def remove_non_alpha(self,text):\n \n removelist=\"-\\.\\/\\?\\@\"\n re_alpha_numeric1=r\"[^0-9a-zA-Z\"+removelist+\" ]\"\n clean_text=re.sub(re_alpha_numeric1,'',text)\n clean_text=clean_text.replace('/',' ')\n clean_text=re.sub(' +', ' ', clean_text)\n return clean_text", "def remove_punctations_fun(self): \n self.doc = re.sub('[^a-zA-Z0-9]', ' ', self.doc)", "def remove_special_char(name):\n return re.sub('[^a-zA-Z0-9]', '_', name)", "def rmchars(value):\n value = re.sub(\"[^A-Za-z0-9.-]+\", \"\", value)\n return value", "def replace_special_chars(self, word):\n try:\n if (self.lang==\"tr\"):\n word = re.sub(u\"\\^db\", u\"+db\", word)\n word = re.sub(u\"\\^\", u\"¬\", word)\n word = re.sub(u\"\\$\", u\"£\", word)\n except UnicodeDecodeError:\n word = ''\n return word", "def filter_out_special_tokens(text: str):\n return text.replace('[', '<').replace(']', '>')", "def clean_token(token):\n\n clean = re.compile(r'[§%]')\n return re.sub(clean, '', token)", "def sanitize(text):\n text = str(text).strip().replace(' ', '_')\n return re.sub(r'(?u)[^-\\w.\\/]', '', text)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return data (tuple of classes, params) for a given host.
def get_host_data(hostname, gettype='walk'): filteredNodes = Node.objects.filter(hostname=hostname) if (filteredNodes.count() == 1): node = filteredNodes[0] exclusions = get_exclusions(node) if gettype == 'work': (classes, params) = work_tree(node, exclusions=exclusions) return (classes, params) elif gettype == 'optwork': (classes, params) = optimized_work_tree(node, exclusions=exclusions) return (classes, params) elif gettype == 'classwork': (classes, params) = work_tree2(node, exclusions=exclusions) return (classes, params) elif gettype == 'walk': (classes, params) = walk_tree(node, exclusions=exclusions) return (classes, params) else: return ({}, {})
[ "def loadAllHostinfo():\n hidata={}\n str=\"\"\n keytypes=loadHostinfoKeys()\n keylist=sorted(hostinfo.keys())\n keylist.remove('hostname')\n for k in keylist:\n \tstr+=\" -p %s \" % k\n f=os.popen('/app/hostinfo/bin/hostinfo --noheader --csv %s' % str)\n data=f.read()\n f.close()\n strfd=cStringIO.StringIO(data)\n reader=csv.reader(strfd)\n\n for line in reader:\n \thost=line.pop(0)\n\thidata[host]={}\n\tfor key in keylist:\n\t data=line.pop(0)\n\t if not data:\n\t \tcontinue\n\t if keytypes[key]=='list':\n\t\thidata[host][key]=data.split(',')\n\t else:\n\t\thidata[host][key]=data\n\n return hidata,keytypes", "def get_host_variables(self, host):\n vars = {}\n for i in self.parsers:\n vars.update(i.get_host_variables(host))\n return vars", "def get_host_metrics(self, host, start_date, end_date):\n target_uri = '/performance/Host/metrics'\n host_perf_payload = {\n 'symmetrixId': self.array_id,\n 'endDate': end_date,\n 'hostId': host,\n 'dataFormat': 'Average',\n 'metrics': ['HostIOs', 'HostMBReads', 'HostMBWrites', 'Reads',\n 'ResponseTime', 'ReadResponseTime', 'Writes',\n 'WriteResponseTime', 'SyscallCount', 'MBs'],\n 'startDate': start_date}\n host_perf_data = self.request(\n target_uri, POST, request_object=host_perf_payload)\n host_results = dict()\n host_results['symmetrixID'] = self.array_id\n host_results['reporting_level'] = 'Host'\n host_results['HostID'] = host\n host_results['perf_data'] = host_perf_data[0]['resultList']['result']\n if 'resultList' in host_perf_data[0]:\n host_results['perf_data'] = host_perf_data[0]['resultList'][\n 'result']\n else:\n host_results['perf_data'] = []\n\n return host_results", "def get_host_data_fields(self):\n\n raise NotImplementedError", "def host(self, host):\n if host in self.hosts_:\n vals = defaultdict(list)\n for k, value in [(x.key.lower(), x.value) for x in self.lines_\n if x.host == host and x.key.lower() != \"host\"]:\n vals[k].append(value)\n flatten = lambda x: x[0] if len(x) == 1 else x\n return {k: flatten(v) for k, v in vals.items()}\n return {}", "def get_host_info(hass: HomeAssistant) -> dict[str, Any] | None:\n return hass.data.get(DATA_HOST_INFO)", "def get_defaultvalues(host):\n return get_obj_defaultvalues(OBJT_HOST, host)", "def fetch_host_caps(self, host):\n e = host.executor()\n cmd_cpuinfo = (\n 'grep', 'vendor_id', '/proc/cpuinfo', '|',\n 'sort', '|',\n 'uniq', '|',\n 'cut', '-d:', '-f2',\n )\n with e.session() as ss:\n # Find vendor\n rc, out, err = ss.run_cmd(cmd_cpuinfo)\n vendor = out.strip()\n if rc or not vendor:\n raise CpuModelError(\"Can not resolve host's cpuinfo: %s\" % err)\n\n # List cpu models\n vds_caps = host.vds_client(cmd=\"Host.getCapabilities\")\n vds_caps = dict() if not vds_caps else vds_caps\n cpu_flags = vds_caps.get(\"cpuFlags\", \"\").split(\",\")\n models = [i for i in cpu_flags if \"model_\"in i]\n if not models:\n logger.warning(\"Can not resolve host's models: %s\", err)\n models = [\n MIN_MODEL.get(self._id_to_vendor(vendor))\n ]\n logger.warning(\n \"Setting minimal cpu model for %s: %s\", vendor, models[0])\n return {\n 'models': models,\n 'vendor': vendor,\n }", "def host(self, host):\n for p, c in self.configs_:\n if host in c.hosts_:\n return c.host(host)\n return {}", "def _get_vm_instance_data(self, services, deployment, deployed_app):\n internal_service, external_service = self._get_internal_external_services_set(\n services\n )\n\n data = [\n VmDetailsProperty(key=\"Image\", value=self._get_image(deployment)),\n VmDetailsProperty(\n key=\"Replicas\", value=self._get_replicas(deployment, deployed_app)\n ),\n VmDetailsProperty(\n key=\"Ready Replicas\", value=self._get_ready_replicas(deployment)\n ),\n VmDetailsProperty(\n key=\"Internal IP\", value=self.get_internal_ip(internal_service)\n ),\n VmDetailsProperty(\n key=\"Internal Ports\", value=self._get_service_ports(internal_service)\n ),\n VmDetailsProperty(\n key=\"External IP\", value=self.get_external_ip(external_service)\n ),\n VmDetailsProperty(\n key=\"External Ports\",\n value=self._get_external_service_ports(external_service),\n ),\n ]\n\n return data", "def restructure_host_cpu_data(host):\n init_cpu_counts(host)\n host.sockets = len(host.nodes or [])\n host.hyperthreading = False\n host.physical_cores = 0\n if not host.cpus:\n return\n host.cpu_model = host.cpus[0].cpu_model\n cpu_list = sorted(host.cpus, key=_sort_by_coreid)\n for cpu in cpu_list:\n inode = pecan.request.dbapi.inode_get(inode_id=cpu.forinodeid)\n cpu.numa_node = inode.numa_node\n if cpu.thread == 0:\n host.physical_cores += 1\n elif cpu.thread > 0:\n host.hyperthreading = True\n function = cpu.allocated_function or get_default_function(host)\n host.cpu_functions[cpu.numa_node][function].append(int(cpu.cpu))\n host.cpu_lists[cpu.numa_node].append(int(cpu.cpu))", "def _vmware_host_metadata(self, inventory, host):\n host_name = host.name\n host_dc_name = inventory[host_name]['dc']\n host_cluster_name = inventory[host_name]['cluster']\n\n return host_name, host_dc_name, host_cluster_name", "def get_host_device_info(self, hostname):\n if hostname in self.devices:\n return self.devices[hostname]\n else:\n return self.devices", "def gather_metadata(params):\n params['PatchVersion'] = platform.release()\n\n try:\n #Data gathered from the HOST node\n url = 'http://169.254.169.254/metadata/instance?api-version=2017-04-02'\n headers = {'Metadata': 'true'}\n r = requests.get(url, headers=headers)\n jtext = json.loads(r.text)\n params['Region'] = jtext['compute']['location']\n params['VMSize'] = jtext['compute']['vmSize']\n params['OS_SKU'] = jtext['compute']['sku']\n params['vmId'] = jtext['compute']['vmId']\n params['OS_Distro'] = jtext['compute']['offer']\n params['vmSender'] = jtext['compute']['name']\n\t\n if jtext['compute']['osType'] == \"Linux\":\n params['IsLinux'] = \"True\"\n else:\n params['IsLinux'] = \"False\"\n except:\n ## Metadata request failed...\n if (debug): print('Metadata request failed. Using empty parameters.\\n')\n params['Region'] = ''\n params['VMSize'] = ''\n params['OS_SKU'] = ''\n params['vmId'] = ''\n params['OS_Distro'] = ''\n params['IsLinux'] = ''", "def __get_host_apps(host):\n apps = list()\n # from Shodan\n if 'data' in host:\n for data in host['data']:\n if 'product' in data:\n app = dict()\n app['name'] = data['product']\n app['version'] = data['version'] if 'version' in data else None\n apps.append(app)\n if 'http' in host['data'] and 'components' in host['data']['http'] \\\n and len(host['data']['http']['components']) > 0:\n for component in host['data']['http']['components']:\n apps.append({'name': component, 'version': None})\n # from ZoomEye\n app_types = ['component', 'db', 'webapp', 'server', 'framework', 'waf']\n for app_type in app_types:\n if app_type in host and len(host[app_type]) > 0:\n for app in host[app_type]:\n app_dict = dict()\n app_dict['name'] = app['name']\n if 'version' in app:\n app_dict['version'] = app['version']\n else:\n app_dict['version'] = None\n apps.append(app_dict)\n if 'system' in host and len(host['system']) > 0:\n for system in host['system']:\n app = dict()\n app['name'] = system['distrib'] if system['distrib'] is not None else system['name']\n if 'version' in system:\n app['version'] = system['version']\n else:\n app['version'] = None\n apps.append(app)\n if 'language' in host and len(host['language']) > 0:\n for language in host['language']:\n apps.append({'name': language, 'version': None})\n return apps", "def get_host_info(search_keyword, starbucks_data, city_info):\n host_data = []\n\n payload = {\n \"query_type\": \"RQBXY\",\n \"pagesize\": \"20\",\n \"pagenum\": '',\n \"qii\": \"true\",\n \"cluster_state\": \"5\",\n \"need_utd\": \"true\",\n \"utd_sceneid\": \"1000\",\n \"div\": \"PC1000\",\n \"addr_poi_merge\": \"true\",\n \"is_classify\": \"true\",\n \"zoom\": \"14\",\n \"longitude\": starbucks_data['longitude'],\n \"latitude\": starbucks_data['latitude'],\n \"range\": \"1000\",\n \"city\": city_info[1][0],\n \"keywords\": search_keyword,\n }\n\n for page_num in range(1, 3):\n payload['pagenum'] = page_num\n poi_list = request_amap_poi_info(payload, 'https://www.amap.com/place/' + starbucks_data['amap_key'])\n\n if not poi_list:\n print('request host list fail with %s' % page_num)\n continue\n\n for poi in poi_list:\n if not (poi.get('longitude', '') or poi.get('latitude', '') or starbucks_data['longitude'] or starbucks_data['latitude']):\n distance = None\n else:\n distance = geo_distance(poi.get('longitude', ''), poi.get('latitude', ''),starbucks_data['longitude'], starbucks_data['latitude'])\n\n data = {\n 'starbucks_key': starbucks_data['amap_key'],\n 'keyword': search_keyword,\n 'city': poi.get('cityname'),\n 'name': poi.get('name'),\n 'longitude': poi.get('longitude'),\n 'latitude': poi.get('latitude'),\n 'address': poi.get('address'),\n 'tel': poi.get('tel'),\n 'mean_price': '',\n 'distance': distance\n }\n domain_list = poi.get('domain_list')\n for domain in domain_list:\n if domain.get('name', '') == 'price':\n price_raw = domain.get('value', '')\n # price_raw = \"<font color='#90969a'>人均:</font><font color='#f84b57'>¥</font><font color='#f84b57'>114</font>\"\n try:\n data['mean_price'] = re.findall('<.*>人均:<.*>¥<.*>([0-9]+)</font>', price_raw)[0]\n except:\n data['mean_price'] = None\n break\n host_data.append(data)\n\n print('【%s】的【%s】的周边的【%s】菜系,第【%d】页爬取完毕' % (city_info[1], starbucks_data['name'], search_keyword, page_num))\n return host_data", "def stats_search(self, host):\n\n s = self.get_stats(host, 'search')\n\n data = {\n 'query_total': s['query_total'],\n 'fetch_time_in_millis': s['query_time_in_millis'],\n 'fetch_total': s['fetch_total'],\n 'query_time_in_millis': s['fetch_time_in_millis'],\n 'open_contexts': s['open_contexts'],\n 'fetch_current': s['fetch_current'],\n 'query_current': s['query_current']\n }\n\n return data", "def _nodeinfo_endpoint(host):\n zkclient = context.GLOBAL.zk.conn\n nodeinfo_zk_path = '{}/{}'.format(z.ENDPOINTS, 'root')\n for node in zkclient.get_children(nodeinfo_zk_path):\n if 'nodeinfo' in node and host in node:\n data, _metadata = zkclient.get(\n '{}/{}'.format(nodeinfo_zk_path, node)\n )\n return data.decode().split(':')", "def get_host_settings(self, p_host=\"localhost\"):\n columns = ['interval', 'last_update_utc', 'next_update_utc', 'unique_id']\n host_settings = util.read_hosts(p_host)\n return dict(zip(columns, host_settings))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a node entry definition if there is no lower depth definition. Raises RuntimeError if the depth matches.
def add_entry(self, key, value, depth): current = self.entries.get(key, None) if current is None or current.depth > depth: self.entries[key] = NodeEntry(key, value, depth) elif current.depth == depth: raise RuntimeError('Collision [depth=%d] for entry [type=%s]: %s' % (depth, self.nodetype, key))
[ "def _add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError(\"root exists\")\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def _add_root(self, e):\n\t\tif self._root is not None:\n\t\t\traise ValueError('Root Exists')\n\n\t\tself._size = 1\n\t\tself._root = self._Node(e)\n\t\treturn self._make_position(self._root)", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._root = self._Node(e)\n self._size = 1\n return self._root", "def _add(self, root, element, currentDepth):\n # When adding an element from the actual node, all elements less important\n # than the actual node are ALWAYS in the right branch, but the most importants\n # are on the left branch\n if root.data < element:\n if root.left == None:\n root.left = Node(element)\n if currentDepth > self.depth:\n self.depth = currentDepth\n return root.left\n else:\n # print \"Going to left branch at depth\", currentDepth\n return self._add(root.left, element, currentDepth + 1)\n else:\n if root.right == None:\n # print \"Adding new right leave\", element\n root.right = Node(element)\n if currentDepth > self.depth:\n self.depth = currentDepth\n return root.right\n else:\n # print \"Going to right branch at depth\", currentDepth\n return self._add(root.right, element, currentDepth + 1)", "def add_child(self, definition):\n self.children.append(definition)", "def build_node_from_entry(self, entry):\n if entry is None:\n mess = \"Object browser entry expected, %s found\" % entry\n raise ValueError(mess)\n node = Node(self.sstd, self.sbld, entry)\n sobj = node.get_sobj()\n if sobj.GetID() == sobj.GetFatherComponent().GetID():\n node = Node(self.sstd, self.sbld, entry, is_root=True)\n return node", "def level_order_insert(self, key):\n\n if self.root is None:\n self.root = Node(key)\n return\n\n q = queue.Queue()\n q.put(self.root)\n while q.qsize()>0:\n e = q.get()\n if e.left == None:\n e.left = Node(key)\n break\n else:\n q.put(e.left)\n if e.right == None:\n e.right = Node(key)\n break\n else:\n q.put(e.right)", "def _check_for_node_def(\n isy_data: IsyData, node: Node, single_platform: Platform | None = None\n) -> bool:\n if not hasattr(node, \"node_def_id\") or node.node_def_id is None:\n # Node doesn't have a node_def (pre 5.0 firmware most likely)\n return False\n\n node_def_id = node.node_def_id\n\n platforms = NODE_PLATFORMS if not single_platform else [single_platform]\n for platform in platforms:\n if node_def_id in NODE_FILTERS[platform][FILTER_NODE_DEF_ID]:\n isy_data.nodes[platform].append(node)\n return True\n\n return False", "def add_line_info(root_node):\n class AddLineNumbers(BottomUpVisitor):\n def __init__(self):\n BottomUpVisitor.__init__(self, strict_line_order=True, make_unique=True)\n def visit_one_node(self, node, lineno=None):\n# print(node, lineno, getattr(node, 'lineno', None))\n if not hasattr(node, 'lineno'):\n node.lineno = lineno\n else:\n if node.lineno != lineno:\n print(node, lineno, node.lineno)\n print(astor.dump(root_node))\n assert False\n BottomUpVisitor.visit_one_node(self, node, lineno)\n AddLineNumbers().visit(root_node)", "def _add_sitemap_tree_element(self, element_class, element_depth, \n element_parent, **kwargs):\n\n # Create new sitemap element corresponding to given element class\n if element_class is HeadlineElement:\n element_headline = kwargs['headline']\n new_element = HeadlineElement(element_headline, element_depth, \n element_parent)\n elif element_class is TextReferenceElement:\n element_reference = kwargs['reference']\n element_title = kwargs['title']\n new_element = TextReferenceElement(element_reference, element_title, \n element_depth, element_parent)\n \n # Check whether added element is root of sitemap tree or not\n if element_parent:\n element_parent.append_child(new_element)\n else:\n self._sitemap_tree = new_element\n return new_element", "def add_edge(self, parent, child):\r\n if child not in self.undeclared_nodes:\r\n raise LookupError(\"Node does not exist in undeclared nodes\")\r\n tree_node_parent = self.find_node(parent)\r\n tree_node_child = TreeNode(child)\r\n tree_node_child.parent = tree_node_parent\r\n tree_node_parent.children.append(tree_node_child)\r\n self.undeclared_nodes.remove(child)", "def add_definition(self, definition):\n for existing in self._iter_definitions():\n if existing.is_match(definition):\n raise ValueError('New definition conflicts with existing '\n 'definition in scope.')\n else:\n self._definitions.append(definition)\n self._add_to_tree(definition)\n definition._scope = self", "def test_tree_two_nodes_left_has_depth_one(one_t):\n one_t.insert(5)\n assert one_t.depth() == 1", "def add_node(self, node: Node) -> None:\n with scandir(node.path) as it:\n for entry in it:\n if entry.name.startswith('.') or entry.name.startswith('__'):\n continue\n if entry.is_dir():\n if len(node.children) > 50:\n pass\n else:\n node.children.append(Node(node, entry))\n else:\n node.files.append(entry)\n for child in node.children:\n self.add_node(child)\n if child.depth > self.depth:\n self.depth = child.depth", "def _get_or_add_definition(self):\n # ---note this method is called recursively to access inherited definitions---\n # ---case-1: definition is not inherited---\n if self._has_definition:\n return self._definition\n # ---case-2: definition is inherited and belongs to second-or-later section---\n prior_headerfooter = self._prior_headerfooter\n if prior_headerfooter:\n return prior_headerfooter._get_or_add_definition()\n # ---case-3: definition is inherited, but belongs to first section---\n return self._add_definition()", "def set_recursion_depth_entry(self, recursion_depth):\n self.entries[\"ent_recursion_depth\"].delete(0, END)\n self.entries[\"ent_recursion_depth\"].insert(\n 0, str(recursion_depth))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds all the entries in objs at the current depth.
def add_entries(self, objs, keyname, valuename, depth): add_entry = self.add_entry for obj in objs: key = getattr(obj, keyname, None) if key is None: continue value = getattr(obj, valuename, None) add_entry(key, value, depth)
[ "def _add_children(tree, git_objects, git_root):\n for line in cat_file(tree.sha, git_root, CatFileOption.PRETTY).split(\"\\n\"):\n *_, sha, name = line.strip().split()\n child = git_objects[sha]\n tree.add_child(name, child)", "def add(self, fetchables, depth=1):\n if fetchables:\n if isinstance(fetchables, collections.Sequence):\n for fetchable in fetchables:\n self.add(fetchable, depth)\n else:\n log.debug(\"Adding to queue: %s (depth=%s)\", fetchables, depth)\n self.q.append((fetchables, depth))", "def add_all(self, all_of_them):\n if type(all_of_them) is not AllChildren:\n raise TypeError(\n \"%s != AllCheldren: db.add_all can only add \"\n \"AllChildren objects to the database.\" % type(all_of_them)\n )\n\n # Start with Children\n children = all_of_them.get_children()\n for child in children:\n # We do not save the return value as it\n # is not needed for anything here.\n self.add_or_update_child(child)\n\n # Finish with SiblingGroups\n sgroups = all_of_them.get_siblings()\n for sgroup in sgroups:\n # We do not save the return value as it\n # is not needed for anything here.\n self.add_or_update_sibling_group(sgroup)", "def save_all(cls, objs):\n if not objs:\n return\n return cls()._deep_save(objs, [])", "def add_prebuilt_objects(self):\n\n # global prebuilt_objects\n # I can't remember why I need to reload this ...\n import prebuilt_objects\n # importlib.reload(prebuilt_objects)\n\n for obj_list in [\n prebuilt_objects.users,\n prebuilt_objects.game_worlds,\n # prebuilt_objects.all_abilities,\n prebuilt_objects.all_store_items,\n prebuilt_objects.all_marketplace_items,\n prebuilt_objects.all_quests,\n prebuilt_objects.all_specializations,\n prebuilt_objects.all_forums,\n prebuilt_objects.all_monsters]:\n for obj in obj_list:\n self.session.add(obj)\n if isinstance(obj, models.Account):\n obj.password = services.secrets.encrypt(obj.password)\n obj.timestamp = datetime.datetime.utcnow()\n self.update()\n default_quest_paths = self.get_default_quest_paths()\n for hero in self.session.query(models.Hero).all():\n hero.journal.quest_paths = default_quest_paths\n self.update()", "def add_all(self, objects):\n self.lock.acquire()\n self.__Session.add_all(objects)\n self.__Session.commit()\n self.lock.release()", "def add_objects(self, environment_objects):\n\n if not isinstance(environment_objects, list):\n environment_objects = [environment_objects]\n\n for environment_object in environment_objects:\n if environment_object.total_cars is not None:\n self.total_car_percent += environment_object.total_cars\n\n environment_object.total_car_percent = self.total_car_percent\n environment_object.set_environment(self)\n\n if not self.environment_objects.get(type(environment_object)):\n self.environment_objects[type(environment_object)] = []\n\n self.environment_objects[type(environment_object)].append(\n environment_object)\n\n self.get_all_unique_nodes()\n\n return self", "def _add_all_to_tree(elms, trie):\n for elm in elms:\n tokens = tokenize(elm.name)\n for token in tokens:\n trie.add(token, elm)", "def _add_objects(self):\n modname = sys.modules[__name__]\n for name, cls in inspect.getmembers(modname, self._is_obj_class):\n self._sub_classes[name] = cls", "def add(self, *objs):\n for obj in objs:\n if isinstance(obj, BrianObject):\n if obj._network is not None:\n raise RuntimeError('%s has already been simulated, cannot '\n 'add it to the network. If you were '\n 'trying to remove and add an object to '\n 'temporarily stop it from being run, '\n 'set its active flag to False instead.'\n % obj.name)\n if obj not in self.objects: # Don't include objects twice\n self.objects.append(obj)\n self.add(obj.contained_objects)\n else:\n # allow adding values from dictionaries\n if isinstance(obj, Mapping):\n self.add(*list(obj.values()))\n else:\n try:\n for o in obj:\n # The following \"if\" looks silly but avoids an infinite\n # recursion if a string is provided as an argument\n # (which might occur during testing)\n if o is obj:\n raise TypeError()\n self.add(o)\n except TypeError:\n raise TypeError(\"Can only add objects of type BrianObject, \"\n \"or containers of such objects to Network\")", "def add_scene_objects(self, obj_tid_catids):\n self._scene_objects.extend(obj_tid_catids)\n # for oid, scene_object in scene.objects.items():\n # if scene_object.label in ('book', 'wall', 'floor'):\n # self._ignored_cats.add(scene_object.label)\n # continue\n # try:\n # cat = TRANSLATIONS_CATEGORIES[scene_object.label]\n # except KeyError:\n # cat = scene_object.label\n #\n # try:\n # cat_id = CATEGORIES[cat]\n # self._scene_objects.append((scene_object, idx_t, cat_id))\n # except KeyError:\n # self._ignored_cats.add(cat)", "def addChildren(self,**kwargs):\n self.__children.update(**kwargs)\n self.childrenParamsUpdate()", "def addObjectsToGroup(self):\n\t\tmc.delete( self.objects, ch = True )\n\t\tmc.parent( self.objects, self.grp.name )\n\t\tmc.makeIdentity( self.objects, apply=True,t=1,r=1,s=1,n=2)\n\t\t#self.lockObjects()", "def register_recursive(objects):\n for obj in objects:\n if isclass(obj):\n bpy.utils.register_class(obj)\n elif hasattr(obj, \"register\"):\n obj.register()\n elif hasattr(obj, \"REGISTER_CLASSES\"):\n register_recursive(obj.REGISTER_CLASSES)\n else:\n print(f\"Warning: Failed to find anything to register for '{obj}'\")", "def increaseDepth(self):\n\t\tself.deepestChildren = []", "def add_children(self, children):\n for child in children:\n self.add_child(child)", "def add_objects(self, objects):\n requests = []\n for obj in objects:\n requests.append({\"action\": \"addObject\", \"body\": obj})\n request = {\"requests\": requests}\n return self.batch(request)", "def all_children(self):\n stack = [self] # list of containers, including self, to add and later parse for children\n ret = list()\n self.__obj = LabelledDict(label='all_objects', key_attr='object_id')\n while len(stack): # search until there's nothing in the list\n n = stack.pop()\n ret.append(n)\n if n.object_id is not None:\n self.__obj[n.object_id] = n\n else: # pragma: no cover\n # warn that a child does not have an object_id, which is unusual\n warn('%s \"%s\" does not have an object_id' % (type(n).__class__, n.name))\n if hasattr(n, 'children'):\n for c in n.children:\n stack.append(c)\n return ret", "def collect(self, objs, *args, **kwargs):\r\n if kwargs.get(\"source\", None) is None:\r\n self.root_objs = objs\r\n super(SoftDeleteCollector, self).collect(objs, *args, **kwargs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine if a sysfs_gpu_name file indicates an AMD device
def _is_amd(sysfs_gpu_name): with open(sysfs_gpu_name) as src: return src.read().strip() == 'amdgpu'
[ "def is_GPU_available():\n code = os.system(\"nvidia-smi\")\n return code == 0", "def is_gpu(xpu):\n return xpu.main_device is not None\n # return 'gpu' in xpu.mode", "def find_available_device():\r\n ids = ['h', 'i', 'j', 'k', 'l', 'm', 'n']\r\n for device_id in ids:\r\n if not os.path.lexists('/dev/sd'+device_id) and not os.path.lexists('/dev/xvd'+device_id):\r\n return device_id\r\n raise \"No available device\"", "def is_gpu_available() -> bool:\n return True", "def is_system_usable_block_device(pydev_device):\n if pydev_device.get(\"ID_BUS\") == \"usb\":\n # Skip USB devices\n return False\n if pydev_device.get(\"DM_VG_NAME\") or pydev_device.get(\"DM_LV_NAME\"):\n # Skip LVM devices\n return False\n if constants.DEVICE_NAME_MPATH in pydev_device.get(\"DM_NAME\", \"\") and pydev_device.get(\"DM_PART\", \"\"):\n # Skip mpath partition devices\n return False\n if pydev_device.get(\"ID_FS_TYPE\") == constants.DEVICE_FS_TYPE_MPATH:\n # Skip mpath member devices\n return False\n id_path = pydev_device.get(\"ID_PATH\", \"\")\n if \"iqn.\" in id_path or \"eui.\" in id_path:\n # Skip all iSCSI devices, they are links for volume storage.\n # As per https://www.ietf.org/rfc/rfc3721.txt, \"iqn.\" or \"edu.\"\n # have to be present when constructing iSCSI names.\n return False\n if ((\"-fc-\" in id_path or \"-lun-\" in id_path) and\n is_valid_multipath(pydev_device.get('DEVNAME'))):\n return False\n if pydev_device.get(\"ID_VENDOR\") == constants.VENDOR_ID_LIO:\n # LIO devices are iSCSI, should be skipped above!\n LOG.error(\"Invalid id_path. Device %s (%s) is iSCSI!\" %\n (id_path, pydev_device.get('DEVNAME')))\n return False\n return True", "def ConvertGpuToVendorName(gpu):\n if not gpu:\n return 'No GPU'\n elif '8086' in gpu:\n return 'Intel'\n elif '10de' in gpu:\n return 'NVIDIA'\n elif '1002' in gpu:\n return 'AMD'\n return gpu", "def is_gpu_device(self, device):\n return device in self._gpu_devices", "def is_cuda_device(device):\n\treturn 'cuda' in str(device)", "def _amd_index(sysfs_gpu_name):\n drop_prefix = sysfs_gpu_name.strip()[len(_SYSFS_PREFIX):]\n return drop_prefix.split('/')[0]", "def __is_blacklisted_blockdev(dev_name):\n if dev_name.startswith(\"ram\") or dev_name.startswith(\"fd\"):\n return True\n\n if flags.installer_mode:\n if any(re.search(expr, dev_name) for expr in INSTALLER_BLACKLIST):\n return True\n\n if os.path.exists(\"/sys/class/block/%s/device/model\" %(dev_name,)):\n model = open(\"/sys/class/block/%s/device/model\" %(dev_name,)).read()\n for bad in (\"IBM *STMF KERNEL\", \"SCEI Flash-5\", \"DGC LUNZ\"):\n if model.find(bad) != -1:\n log.info(\"ignoring %s with model %s\", dev_name, model)\n return True\n\n return False", "def device_is_dm_anaconda(info):\n return device_dm_subsystem_match(info, \"anaconda\")", "def is_magisk_su():\n stdout, _ = adb_execute_and_get_output([\"su\", \"--help\"])\n return \"MagiskSU\" in stdout", "def own_gpu_expansion_slot(self):\n return self.get_product_name().endswith('Graphics Exp')", "def detect_device_type():\n if glob.glob('/proc/acpi/battery/*'):\n # Linux: If we have a battery, assume Laptop\n return 'laptop'\n\n return 'desktop'", "def isa(device_name):\n\n if not device_name:\n raise DmDeviceError(_(\"No device name given.\"))\n if device_name != os.path.basename(device_name):\n msg = _(\"Invalid device name %r given.\") % (device_name)\n raise DmDeviceError(msg)\n\n bd_dir = os.sep + os.path.join('sys', 'block', device_name)\n if not os.path.exists(bd_dir):\n return False\n\n dm_dir = os.path.join(bd_dir, 'dm')\n if not os.path.exists(dm_dir):\n return False\n\n return True", "def _CheckMsrKernelModule():\n proc = subprocess.Popen('/sbin/lsmod', stdout=subprocess.PIPE)\n stdout = proc.communicate()[0]\n ret = proc.wait()\n if ret != 0:\n raise OSError('lsmod failed')\n\n if not any([line.startswith('msr ') for line in stdout.splitlines()]):\n print('Error: MSR module not loaded.')\n return False\n\n return True", "def test_change_name_of_the_devicefalse():", "def _IsDevice(self, file_attribute_flags):\n if file_attribute_flags is None:\n return False\n return bool(file_attribute_flags & pyfsntfs.file_attribute_flags.DEVICE)", "def is_on_device() -> bool:\n return platform.machine() != 'x86_64'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine the gpu index given a sysfs_gpu_name
def _amd_index(sysfs_gpu_name): drop_prefix = sysfs_gpu_name.strip()[len(_SYSFS_PREFIX):] return drop_prefix.split('/')[0]
[ "def choose_gpu():\r\n # query GPU memory and save the result in `tmp`\r\n os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')\r\n # read the file `tmp` to get a gpu memory list\r\n memory_gpu = [int(x.split()[2]) for x in open('tmp','r').readlines()]\r\n log.logger.info('memory_gpu: {}'.format(memory_gpu))\r\n # get the id of the gpu with the most memory\r\n gpu_id = str(np.argmax(memory_gpu))\r\n # remove the file `tmp`\r\n os.system('rm tmp')\r\n return gpu_id", "def _next_device(self):\n if self._num_gpus == 0:\n return ''\n dev = '/gpu:%d' % self._cur_gpu\n if self._num_gpus > 1:\n self._cur_gpu = (self._cur_gpu + 1) % (self._num_gpus-1)\n return dev", "def _next_device(self):\n if self._num_gpus == 0:\n return ''\n dev = '/gpu:%d' % self._cur_gpu\n if self._num_gpus > 1:\n self._cur_gpu = (self._cur_gpu + 1) % (self._num_gpus-1)\n return dev", "def deviceid(gpu):\n\n # Return if this is already a torch device\n # pylint: disable=E1101\n if isinstance(gpu, torch.device):\n return gpu\n\n # Always return -1 if gpu is None or an accelerator device is unavailable\n if gpu is None or not Models.hasaccelerator():\n return -1\n\n # Default to device 0 if gpu is True and not otherwise specified\n if isinstance(gpu, bool):\n return 0 if gpu else -1\n\n # Return gpu as device id if gpu flag is an int\n return int(gpu)", "def try_gpu(i=0):\n if len(tf.config.experimental.list_physical_devices('GPU')) >= i + 1:\n return tf.device(f'/GPU:{i}')\n return tf.device('/CPU:0')", "def get_cuda_device(minor_idx):\n\n executable_path = os.path.join(os.path.dirname(__file__), 'build')\n try:\n num_devices = int(subprocess.check_output(\n [\"{}/query_devices\".format(executable_path)]))\n except subprocess.CalledProcessError as e:\n return 0\n\n for i in range(num_devices):\n output = subprocess.check_output([\"nvidia-smi\", '-q', '-i', str(i)])\n output_list = output.decode(\"utf-8\").split('\\n')\n output_list = [item for item in output_list if 'Minor' in item]\n num = int(output_list[0].split(':')[-1])\n if num == minor_idx:\n return i\n return 0", "def GetGPU():\n return option['device_id']", "def get_device_index(name):\n dev_list = get_device_list()\n \n for i in range(0, len(dev_list)):\n if name == dev_list[i][\"name\"]:\n return i \n\n raise DRTError(\"Name: %s is not a known type of devices\" % name)", "def get_freer_gpu():\n os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')\n memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]\n return np.argmax(memory_available)", "def _get_cuda_device_id(gpu_id, job_name, using_cpu_only):\n if using_cpu_only or job_name != 'worker':\n return ''\n cuda_visible_devices = os.environ.get('CUDA_VISIBLE_DEVICES', None)\n if len(cuda_visible_devices.split(',')) > gpu_id:\n return cuda_visible_devices.split(',')[gpu_id]\n return cuda_visible_devices", "def ConvertGpuToVendorName(gpu):\n if not gpu:\n return 'No GPU'\n elif '8086' in gpu:\n return 'Intel'\n elif '10de' in gpu:\n return 'NVIDIA'\n elif '1002' in gpu:\n return 'AMD'\n return gpu", "def get_gpu_ids():\n worker = global_worker\n worker.check_connected()\n return worker.get_resource_ids_for_resource(\n ray_constants.GPU, f\"^{ray_constants.GPU}_group_[0-9A-Za-z]+$\"\n )", "def try_gpu(i=0):\n if torch.cuda.device_count() >= i + 1:\n return torch.device(f'cuda:{i}')\n return torch.device('cpu')", "def _current_device_index(self) -> int:\n device = PArray._get_current_device()\n if device is None: # not called inside current task\n return self._coherence.owner\n elif device.architecture == cpu:\n return CPU_INDEX\n else:\n # assume GPU here, won't check device.architecture == gpu\n # to avoid import `gpu`, which is slow to setup.\n return device.index", "def auto_select_gpu():\n if HAS_NVML:\n pynvml.nvmlInit()\n deviceCount = pynvml.nvmlDeviceGetCount()\n largest_free_mem = 0\n largest_free_idx = 0\n for i in range(deviceCount):\n handle = pynvml.nvmlDeviceGetHandleByIndex(i)\n info = pynvml.nvmlDeviceGetMemoryInfo(handle)\n if info.free > largest_free_mem:\n largest_free_mem = info.free\n largest_free_idx = i\n pynvml.nvmlShutdown()\n largest_free_mem = largest_free_mem / 1024. / 1024. # Convert to MB\n if platform.node() == 'localhost.localdomain':\n if deviceCount == 3:\n idx_to_gpu_id = {0: '2', 1: '1', 2: '0'}\n elif deviceCount == 2:\n idx_to_gpu_id = {0: '1', 1: '0'}\n elif deviceCount == 1:\n idx_to_gpu_id = {0: '0'}\n else:\n idx_to_gpu_id = {0: ''}\n else:\n idx_to_gpu_id = {}\n for i in range(deviceCount):\n idx_to_gpu_id[i] = '{}'.format(i)\n\n gpu_id = idx_to_gpu_id[largest_free_idx]\n print('Using largest free memory GPU {} with free memory {}MB'.format(gpu_id, largest_free_mem))\n return gpu_id\n else:\n print('INFO: pynvml is not found, automatically select gpu is disabled!')\n return '0'", "def get_free_gpu():\n\tos.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')\n\tif os.path.exists('tmp'):\n\t\tmemory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]\n\t\tos.remove('tmp')\n\t\treturn np.argmax(memory_available)\n\treturn 0", "def device_index(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"device_index\")", "def get_gpus():\n if platform.system() == \"Windows\":\n # If the platform is Windows and nvidia-smi\n # could not be found from the environment path,\n # try to find it from system drive with default installation path\n nvidia_smi = spawn.find_executable(\"nvidia-smi\")\n if nvidia_smi is None:\n nvidia_smi = (\n \"%s\\\\Program Files\\\\NVIDIA Corporation\\\\NVSMI\\\\nvidia-smi.exe\"\n % os.environ[\"systemdrive\"]\n )\n else:\n nvidia_smi = \"nvidia-smi\"\n\n cmd = (\n \"--query-gpu=index,uuid,utilization.gpu,memory.total,memory.used,memory.free,\"\n \"driver_version,name,gpu_serial,display_active,display_mode,temperature.gpu\"\n )\n formatting = \"--format=csv,noheader,nounits\"\n popen_cmds = [nvidia_smi, cmd, formatting]\n\n # Get index, uuid, processing and memory utilization and so on for all GPUs\n try:\n p = Popen(\n popen_cmds,\n stdout=PIPE,\n )\n stdout, stderr = p.communicate()\n except BaseException as be:\n print(\"exception while monitoring - \", str(be))\n sys.exit(1)\n\n output = stdout.decode(\"UTF-8\")\n # Parse output\n # split on operating system line break - Linux default is \"\\n\"\n lines = output.split(\"\\n\")\n lines = [l for l in lines if l != \"\"]\n num_devices = len(lines)\n temp = []\n for g in range(num_devices):\n line = lines[g]\n vals = line.split(\", \")\n for i in range(12):\n if i == 0:\n device_id = int(vals[i])\n elif i == 1:\n uuid = vals[i]\n elif i == 2:\n gpu_util = _safe_float_cast(vals[i]) / 100\n elif i == 3:\n mem_total = _safe_float_cast(vals[i])\n elif i == 4:\n mem_used = _safe_float_cast(vals[i])\n elif i == 5:\n mem_free = _safe_float_cast(vals[i])\n elif i == 6:\n driver = vals[i]\n elif i == 7:\n gpu_name = vals[i]\n elif i == 8:\n serial = vals[i]\n elif i == 9:\n display_active = vals[i]\n elif i == 10:\n display_mode = vals[i]\n elif i == 11:\n temp_gpu = _safe_float_cast(vals[i])\n gpu = GPU(\n device_id,\n uuid,\n gpu_util,\n mem_total,\n mem_used,\n mem_free,\n driver,\n gpu_name,\n serial,\n display_mode,\n display_active,\n temp_gpu,\n )\n # 每一次结果操作num_devices次\n temp.append(gpu)\n GPUs.append(temp)", "def get_num_gpus():\n try:\n gpu_info = subprocess.check_output([\"nvidia-smi\", \"--format=csv,noheader,nounits\", \"--query-gpu=name\"]).decode()\n gpu_info = gpu_info.split('\\n')\n except:\n return 0\n\n count = 0\n for line in gpu_info:\n if len(line) > 0:\n count += 1\n return count" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Configures logging logging_config.json should have been placed in the directory AUTOMINE_LOG_DIR, to which this process must have read and write access
def _configure_logger(): try: log_dir = os.environ['AUTOMINE_LOG_DIR'] log_name = _log_name() cfg_path = os.path.join(log_dir, 'logging_config.json') with open(cfg_path) as src: cfg = json.load(src) handlers = cfg.get('handlers') for handler in iter(handlers.values()): filename = handler.get('filename') if filename: filename = filename.replace('{{AUTOMINE_LOG_DIR}}', log_dir) filename = filename.replace('{{__name__}}', log_name) handler['filename'] = filename loggers = cfg.get('loggers') if '__name__' in loggers: loggers[log_name] = loggers.pop('__name__') # add logging to the console if env var is set log_to_console = 'AUTOMINE_LOG_TO_CONSOLE' in os.environ if log_to_console and 'console' in handlers: logger_handlers = loggers[log_name].get('handlers') if logger_handlers: logger_handlers.append('console') dictConfig(cfg) except Exception as err: # pylint: disable=broad-except logging.basicConfig() raise err
[ "def setup_logging():\n name_json = 'logging_config.json'\n path_json = os.path.join(os.path.dirname(__file__), name_json)\n with open(path_json, 'r') as f_json:\n dict_config = json.load(f_json)\n logging.config.dictConfig(dict_config)", "def _configure_logs(self):\n logging.basicConfig(filename=config.OUTPUT_FILES_PATH + \n \"/\" + config.LOG_FILENAME, filemode='w', level=config.LOG_LEVEL,\n format='%(asctime)s - %(levelname)s - %(filename)s:%(funcName)s:%(lineno)s - %(message)s')\n self._utils.warn_me(\"INFO\", \"Configured the logging file with level '\" + config.LOG_LEVEL + \"' here:\" + config.OUTPUT_FILES_PATH + \"/\" + config.LOG_FILENAME)", "def initialize_logging(self):\n logging_config_path = self.pyleus_config.get('logging_config_path')\n if logging_config_path:\n logging.config.fileConfig(logging_config_path)\n elif os.path.isfile(DEFAULT_LOGGING_CONFIG_PATH):\n logging.config.fileConfig(DEFAULT_LOGGING_CONFIG_PATH)", "def _configure_logging(self):\n pass", "def setup_logging():\n with open(DEFAUL_LOGGING_CONFIG_FILEPATH) as config_fin:\n logging.config.dictConfig(yaml.safe_load(config_fin))", "def _setup_logging(self):\n if self.app_config_has(\"logging\"):\n log_config = self.app_config()[\"logging\"]\n filename_list = [\n v['filename'] for k, v in\n _find_config_tree(log_config, \"filename\")\n ]\n # pre-create directory in advance for all loggers\n for file in filename_list:\n file_dir = os.path.dirname(file)\n if file_dir and not os.path.isdir(file_dir):\n os.makedirs(file_dir, exist_ok=True)\n dictConfig(log_config)\n else:\n log = getLogger()\n handler = StreamHandler()\n formatter = Formatter(\n \"%(asctime)s-%(threadName)s-%(name)s-%(levelname)s-%(message)s\"\n )\n handler.setFormatter(formatter)\n log.addHandler(handler)\n log.setLevel(DEBUG)\n msg = (\"Starting \" + os.path.basename(__name__) +\n \" version \" + __version__ + \" on \" +\n \"_\".join(uname()).replace(\" \", \"_\"))\n logger = getLogger(__name__)\n logger.debug(msg)", "def configure_logger():\n log_dir = os.path.join(_workspace_dir(), \"logs\")\n if os.path.exists(log_dir):\n shutil.rmtree(log_dir)\n os.makedirs(log_dir)\n\n global_log = logging.getLogger()\n global_log.setLevel(logging.DEBUG)\n\n verbose_format = \"%(asctime)s(%(levelname)s->%(module)s):%(message)s\"\n date_format = \"%Y-%m-%d %H:%M:%S\"\n fmt = logging.Formatter(verbose_format, date_format)\n file_handler = logging.FileHandler(\n os.path.join(log_dir, \"tests.log\"),\n mode=\"w\")\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(fmt)\n global_log.addHandler(file_handler)", "def setup_logging(name=''):\n\n log_config = os.environ.get('LOG_CONFIG', name)\n\n # is no logging desired ?\n if log_config is None or log_config.lower() is 'none': \n logger = logging.getLogger('')\n logger.addHandler(logging.NullHandler())\n return\n\n if os.path.isfile(log_config): # found a log config file?\n suffix = os.path.splitext(log_config)[1].lower()\n if suffix in ['.ini', '.conf']:\n logging.config.fileConfig(log_config)\n elif suffix == '.json':\n with open(log_config) as fin:\n cfg = json.load(fin)\n logging.config.dictConfig(cfg)\n return\n\n try: # can I find a log config module ?\n cfg_module = minibelt.import_from_path(log_config)\n if hasattr(cfg_module, 'setup_logging'):\n cfg_module.setup_logging()\n except:\n # basic logging of warnings to stderr?\n logging.basicConfig(\n stream=sys.stdout,\n #level=logging.WARNING,\n level=logging.INFO,\n format=\"%(asctime)s %(levelname)-8s %(name)7s: %(message)s\",\n datefmt=\"%H:%M:%S\",\n )", "def setup_logger():\n log_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'logs')\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n log_file = os.path.join(log_dir, 'management.log')\n project_logger.setup_logger(log_file)", "def init_logging():\r\n logging.config.dictConfig(get_config()['logging'])", "def test_logging_config(self):\n topdir = os.path.dirname(os.path.dirname(__file__))\n # logging config from default\n os.system('rm %s/logging.conf' % topdir)\n cmd, output = runCmdOutput(['-p', '7788'])\n self.assertEqual(cmd.returncode, os.EX_OK)\n # logging config from file\n os.system('cp %s/logging.conf.sample %s/logging.conf' %\n (topdir, topdir))\n cmd, output = runCmdOutput(['-p', '7788'])\n self.assertEqual(cmd.returncode, os.EX_OK)", "def setup_logging(\n module,\n default_level=logging.INFO,\n env_key='LOG_CFG',\n logpath=os.getcwd(),\n config_path=None\n):\n\n if not os.path.exists(os.path.dirname(logpath)):\n os.makedirs(os.path.dirname(logpath))\n timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d_%H:%M\")\n fpath = os.path.join(logpath, module, timestamp)\n\n path = config_path if config_path is not None else os.getenv(env_key, None)\n if path is not None and os.path.exists(path):\n with open(path, 'rt') as f:\n config = yaml.safe_load(f.read())\n for h in config['handlers'].values():\n if h['class'] == 'logging.FileHandler':\n h['filename'] = os.path.join(logpath, module, timestamp, h['filename'])\n touch(h['filename'])\n for f in config['filters'].values():\n if '()' in f:\n f['()'] = globals()[f['()']]\n logging.config.dictConfig(config)\n else:\n lpath=os.path.join(logpath, timestamp)\n if not os.path.exists(lpath):\n os.makedirs(lpath)\n logging.basicConfig(level=default_level, filename=os.path.join(lpath,\"base.log\"))", "def configure_logging(logdir=None):\n logconfig = LOGCONFIG_DICT.copy()\n if logdir:\n debugfile = os.path.join(logdir, DEBUGFILE)\n logconfig['handlers']['debugfile']['filename'] = debugfile\n errorfile = os.path.join(logdir, ERRORFILE)\n logconfig['handlers']['errorfile']['filename'] = errorfile\n\n logging.config.dictConfig(logconfig)", "def test_logging_config_file(self, monkeypatch):\n # We still want the Formatter to be configured.\n assert logging.Formatter.converter == time.gmtime\n assert logging.Formatter.default_time_format == '%Y-%m-%dT%H:%M:%S'\n assert logging.Formatter.default_msec_format == '%s.%03d'\n\n # Set NETDUMPLINGS_LOGGING_CONFIG to point to a test logging config.\n logging_config_file = 'tests/data/logging.json'\n monkeypatch.setenv('NETDUMPLINGS_LOGGING_CONFIG', logging_config_file)\n\n configure_logging()\n\n # The test config file sets all the loggers to ERROR.\n assert logging.getLogger('netdumplings').level == logging.ERROR\n assert logging.getLogger(\n 'netdumplings.dumplinghub').level == logging.ERROR\n assert logging.getLogger(\n 'netdumplings.dumplingkitchen').level == logging.ERROR\n assert logging.getLogger(\n 'netdumplings.dumplingeater').level == logging.ERROR", "def _init_logging_config(self):\n if self.use_db and not self.opt['is_debug']:\n shared_utils.disable_logging()\n else:\n shared_utils.set_is_debug(self.opt['is_debug'])\n shared_utils.set_log_level(self.opt['log_level'])", "def setup_logging(conf_file_locations):\n actual_log_conf_location = None\n for location in conf_file_locations:\n if os.path.exists(os.path.expanduser(location)):\n actual_log_conf_location = location\n break\n\n if actual_log_conf_location != None:\n print(actual_log_conf_location)\n logging.config.fileConfig(os.path.expanduser(actual_log_conf_location))\n else:\n print(\"Unable to locate logging configuration in the \" + \\\n \"following locations:\")\n for location in conf_file_locations:\n print(\" \" + os.path.abspath(os.path.expanduser(location)))", "def config(component, file = cfg.get(\"CLUSTER\", \"mistk.log.config\", DEFAULT_CFG_FILE)):\n with open(file) as reader:\n log_config = json.load(reader)\n \n log_path = log_config['handlers']['file_handler']['filename']\n log_name = component + '.log'\n log_config['handlers']['file_handler']['filename'] = os.path.join(log_path, log_name)\n logging.config.dictConfig(log_config)", "def initLog(config):\n logConfig[\"logFolder\"] = config.get(\"General\", \"logFolder\")\n logConfig[\"logFilePrefix\"] = config.get(\"General\", \"logFilePrefix\")\n logConfig[\"debugLogs\"] = config.get(\"General\", \"debugLogs\").lower() == \"true\"\n\n if not os.path.exists(logConfig[\"logFolder\"]):\n os.makedirs(logConfig[\"logFolder\"])", "def configuration_of_the_logs():\n log_formatter = logging.Formatter('%(asctime)s %(levelname)s %(funcName)s(%(lineno)d) %(message)s')\n\n #todo:#Check to see if the folder exist\n my_path = path.dirname(path.realpath(__file__))\n logFile = my_path + '/logs/logs.txt'\n\n #The logs.txt file can't be more than 5MB\n my_handler = RotatingFileHandler(logFile, mode='a', maxBytes=5*1024*1024,\n backupCount=2, encoding=None, delay=0)\n my_handler.setFormatter(log_formatter)\n my_handler.setLevel(logging.INFO)\n\n app_log = logging.getLogger('root')\n #app_log.setLevel(logging.INFO)\n app_log.setLevel(logging.INFO)\n\n app_log.addHandler(my_handler)\n #app_log.info('configuraring the logs')\n\n return app_log" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Endpoint to display create item page.
def create_item_page(): catagories = [c.name for c in Catagory.fetch_all()] return render_template('add_item.html', catagories=catagories, values={})
[ "def newItem():\n if request.method == 'POST':\n db.createItem(\n title=request.form['title'],\n description=request.form['description'],\n category_id=request.form['category'],\n user_id=login_session['user_id'])\n flash(\"New catalog item created!\", 'success')\n return redirect(url_for('showCatalog'))\n return render_template('new_item.html', categories=db.getAllCategories())", "def create_item():\n name = request.form['name']\n catagory = request.form['catagory']\n description = request.form['description']\n errors = form_errors(request.form)\n if errors:\n catagories = [c.name for c in Catagory.fetch_all()]\n values = {\n 'name': name, 'catagory': catagory, 'description': description\n }\n return render_template(\n 'add_item.html',\n catagories=catagories,\n values=values,\n errors=errors\n )\n Item.create(name, catagory_name=catagory, description=description)\n return redirect(url_for(\n 'read_item', catagory_name=catagory, item_name=name\n ))", "def create(self):\n self.action = 'create'", "def new_item():\n form = ItemForm()\n user = current_user\n\n # If the form is validated, add its data to the database\n if form.validate_on_submit():\n\n # Check that an item with the same name and sport does not\n # already exist, or send a flash message and do not add the\n # new item to the database\n query = Item.query.filter_by(name=form.name.data,\n sport=form.sport.data).first()\n if query:\n flash('This sport already has an item with that name.', 'bad')\n\n # If the item does not yet exist, add all details to the\n # database, send a flash message, and redirect to 'home'\n else:\n name = form.name.data\n sport = form.sport.data\n category = form.category.data\n description = form.description.data\n private = form.private.data\n item = Item(name=name, sport=sport, category=category,\n description=description, private=private,\n user_id=user.id)\n db.session.add(item)\n db.session.commit()\n flash(f'\"{name}\" has been added!', 'good')\n return redirect(url_for('main.home'))\n\n return render_template('new_item.html', form=form, title='New Item')", "def create_item(self, user: User, **kwargs) -> None:", "async def item_create(item_in: ItemCreate, db: Session = Depends(get_db)):\n return create_item(db=db, item=item_in)", "def create_item():\n #if not request.json:\n # abort(400)\n parser = reqparse.RequestParser()\n parser.add_argument('item_code', type=int, required=False, help=\"Item code missing\")\n parser.add_argument('item_name', type=str, required=True, help=\"Item name missing\")\n parser.add_argument('size', type=str, required=True, help=\"Size missing\")\n parser.add_argument('color', type=str, required=True, help=\"Color missing\")\n parser.add_argument('quality', type=str, required=True, help=\"Quality missing\")\n parser.add_argument('username', type=str, required=True, help=\"Username missing\")\n args = parser.parse_args(strict=True)\n user_code = get_user_code(args['username'])\n if user_code is None:\n return make_response(jsonify({'error': 'User does not exists'}), 400)\n new_item = dict(\n item_code = args['item_code'],\n item_name = args['item_name'],\n size_code = get_size_code( args['size']),\n color_code = get_color_code( args['color']),\n quality_code = get_quality_code( args['quality'])\n )\n try:\n u = models.Items(**new_item)\n db.session.add(u)\n db.session.commit()\n except sqlalchemy.exc.IntegrityError, e:\n return make_response(jsonify({'error': 'item code already exists.'}), 400)\n\n return make_response(jsonify({'success': True}))", "def post(self):\n app.logger.info(\"Request to create an inventory item\")\n inventory_item = InventoryItem()\n app.logger.info(\"Payload = %s\", api.payload)\n inventory_item.deserialize(api.payload)\n inventory_item.create()\n message = inventory_item.serialize()\n location_url = api.url_for(\n InventoryItemResource, inventory_item_id=inventory_item.id, _external=True\n )\n app.logger.info(\"Inventory item with ID [%s] created.\", inventory_item.id)\n return (\n inventory_item.serialize(),\n status.HTTP_201_CREATED,\n {\"Location\": location_url},\n )", "def todos_create_page():\n todo = Todo()\n if todo.form_submit():\n todo.update(mongo.db)\n print('Created new TODO: {text}'.format(**todo.doc))\n return redirect('/')\n else:\n return render_template(\n template_name_or_list='todo.html',\n todo=todo,\n handle='Create')", "def new_listing():\n\n return render_template(\"add_listing.html\")", "def create_item(request, app_label=None, model_name=None):\n\n if not model_name:\n raise ImproperlyConfigured\n model_name = model_name.lower()\n\n mod = None\n if app_label is None:\n models = ContentType.objects.filter(model=model_name)\n if models.count() == 0:\n raise Http404 # TODO: Throw better, more descriptive error\n elif models.count() == 1:\n mod = models.first().model_class()\n else: # models.count() > 1:\n # TODO: make this template\n return render(request, \"aristotle_mdr/ambiguous_create_request.html\", {'models': models})\n else:\n try:\n mod = ContentType.objects.get(app_label=app_label, model=model_name).model_class()\n except ObjectDoesNotExist:\n raise Http404 # TODO: Throw better, more descriptive error\n\n class DynamicAristotleWizard(ConceptWizard):\n model = mod\n return DynamicAristotleWizard.as_view()(request)", "def test_api_can_create_shoppingitem(self):\n response = self.client.post(\n reverse('shoppingitems', args=[1]), self.shoppingitem_data, format=\"json\"\n )\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def addItem():\r\n form = AddForm()\r\n\r\n if form.validate_on_submit():\r\n item = Item(name=bleach.clean(form.name.data),\r\n description=bleach.clean(form.description.data),\r\n owner_id=int(current_user.get_id()),\r\n category_id=int(form.category.data))\r\n\r\n db.session.add(item)\r\n db.session.commit()\r\n return redirect(url_for('main.dashboard'))\r\n\r\n return render_template('main/addItem.html', form=form)", "def create(self, request, title=None, items=None, *args, **kwargs):\n\n checklist_template = ChecklistTemplate.objects.create(\n title=title,\n owner=request.user,\n items=items\n )\n\n return 201, {\n self.item_result_key: checklist_template\n }", "def newActionItem():\n if request.method == 'POST':\n # Connect to the database\n con = connect()\n Base.metadata.bind = con\n # Creates a session\n DBSession = sessionmaker(bind=con)\n dbsession = DBSession()\n\n user_id = getUserID(session['email'])\n\n actions = Actions(date_time=request.form['date_time'],\n finding=request.form['finding'],\n corrective_action=request.form['corrective_action'],\n due_date=request.form['due_date'],\n open_close='t',\n user_id=user_id)\n dbsession.add(actions)\n dbsession.commit()\n\n return redirect(url_for('actions'))\n else:\n user_profile = (session['username'], session['picture'])\n return render_template('actions_new.html', user_profile=user_profile)", "def insert_item():\n if 'userinfo' not in session.keys():\n session['target'] = url_for('insert_item')\n return redirect(url_for('gconnect'))\n if request.method == 'POST':\n creator_email = session['userinfo']['email']\n sqlsession = SQLSESSION()\n user = sqlsession.query(User).filter_by(email=creator_email).first()\n item = Item(name=request.form['name'],\n description=request.form['description'],\n category_id=int(request.form['category']),\n creator_id=user.id)\n sqlsession.add(item)\n sqlsession.commit()\n return redirect(\"/\")\n sqlsession = SQLSESSION()\n categories = sqlsession.query(Category).all()\n return render_template(\"new_item.html\",\n categories=categories)", "def createItem(self, item):\r\n try:\r\n self.feed_handler.createItem(item.link, item.title, item.descr,\r\n item.source, item.channelURL)\r\n self.feed_passed = self.feed_passed + 1\r\n except Exception, ex: \r\n # Remove comment for detailed information on feed item created\r\n #print ex\r\n pass", "def goto_create(self):\n\n self.create.click()", "def goto_create(self):\n\n return self.create.click()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Post endpoint to create an item. If form is invalid will return create item page with errors displayed, otherwise create item and redirect to item page.
def create_item(): name = request.form['name'] catagory = request.form['catagory'] description = request.form['description'] errors = form_errors(request.form) if errors: catagories = [c.name for c in Catagory.fetch_all()] values = { 'name': name, 'catagory': catagory, 'description': description } return render_template( 'add_item.html', catagories=catagories, values=values, errors=errors ) Item.create(name, catagory_name=catagory, description=description) return redirect(url_for( 'read_item', catagory_name=catagory, item_name=name ))
[ "def new_item():\n form = ItemForm()\n user = current_user\n\n # If the form is validated, add its data to the database\n if form.validate_on_submit():\n\n # Check that an item with the same name and sport does not\n # already exist, or send a flash message and do not add the\n # new item to the database\n query = Item.query.filter_by(name=form.name.data,\n sport=form.sport.data).first()\n if query:\n flash('This sport already has an item with that name.', 'bad')\n\n # If the item does not yet exist, add all details to the\n # database, send a flash message, and redirect to 'home'\n else:\n name = form.name.data\n sport = form.sport.data\n category = form.category.data\n description = form.description.data\n private = form.private.data\n item = Item(name=name, sport=sport, category=category,\n description=description, private=private,\n user_id=user.id)\n db.session.add(item)\n db.session.commit()\n flash(f'\"{name}\" has been added!', 'good')\n return redirect(url_for('main.home'))\n\n return render_template('new_item.html', form=form, title='New Item')", "def addItem():\r\n form = AddForm()\r\n\r\n if form.validate_on_submit():\r\n item = Item(name=bleach.clean(form.name.data),\r\n description=bleach.clean(form.description.data),\r\n owner_id=int(current_user.get_id()),\r\n category_id=int(form.category.data))\r\n\r\n db.session.add(item)\r\n db.session.commit()\r\n return redirect(url_for('main.dashboard'))\r\n\r\n return render_template('main/addItem.html', form=form)", "def newItem():\n if request.method == 'POST':\n db.createItem(\n title=request.form['title'],\n description=request.form['description'],\n category_id=request.form['category'],\n user_id=login_session['user_id'])\n flash(\"New catalog item created!\", 'success')\n return redirect(url_for('showCatalog'))\n return render_template('new_item.html', categories=db.getAllCategories())", "def insert_item():\n if 'userinfo' not in session.keys():\n session['target'] = url_for('insert_item')\n return redirect(url_for('gconnect'))\n if request.method == 'POST':\n creator_email = session['userinfo']['email']\n sqlsession = SQLSESSION()\n user = sqlsession.query(User).filter_by(email=creator_email).first()\n item = Item(name=request.form['name'],\n description=request.form['description'],\n category_id=int(request.form['category']),\n creator_id=user.id)\n sqlsession.add(item)\n sqlsession.commit()\n return redirect(\"/\")\n sqlsession = SQLSESSION()\n categories = sqlsession.query(Category).all()\n return render_template(\"new_item.html\",\n categories=categories)", "def post(self, request, format=None):\n item = request.data.get('shopping_item', None)\n\n if not item:\n return Response(status=403)\n\n new_item = ShoppingItem.objects.create(name=item)\n serializer = ShoppingItemSerializer(new_item)\n\n return Response(serializer.data)", "def add_item():\n\n\n add_item_form = AddItemForm()\n\n categories = Category.objects()\n\n add_item_form.category.choices = [(category.value, category.label) for category in categories]\n\n if add_item_form.validate_on_submit():\n\n title = add_item_form.title.data\n description = add_item_form.description.data\n price = add_item_form.price.data\n category = add_item_form.category.data\n\n new_item = Item(user = session['user']['id'] , title = title, description = description, price = price, category = category)\n \n new_item.save()\n\n flash(\"Your item has been successfully added.\")\n\n return redirect(url_for('home.home'))\n\n return render_template(\"item/add-item.html\", form = add_item_form)", "def create_item():\n #if not request.json:\n # abort(400)\n parser = reqparse.RequestParser()\n parser.add_argument('item_code', type=int, required=False, help=\"Item code missing\")\n parser.add_argument('item_name', type=str, required=True, help=\"Item name missing\")\n parser.add_argument('size', type=str, required=True, help=\"Size missing\")\n parser.add_argument('color', type=str, required=True, help=\"Color missing\")\n parser.add_argument('quality', type=str, required=True, help=\"Quality missing\")\n parser.add_argument('username', type=str, required=True, help=\"Username missing\")\n args = parser.parse_args(strict=True)\n user_code = get_user_code(args['username'])\n if user_code is None:\n return make_response(jsonify({'error': 'User does not exists'}), 400)\n new_item = dict(\n item_code = args['item_code'],\n item_name = args['item_name'],\n size_code = get_size_code( args['size']),\n color_code = get_color_code( args['color']),\n quality_code = get_quality_code( args['quality'])\n )\n try:\n u = models.Items(**new_item)\n db.session.add(u)\n db.session.commit()\n except sqlalchemy.exc.IntegrityError, e:\n return make_response(jsonify({'error': 'item code already exists.'}), 400)\n\n return make_response(jsonify({'success': True}))", "def add_item(request):\n if request.user.is_superuser:\n if request.method == \"POST\":\n form = ProductForm(request.POST, request.FILES)\n if form.is_valid():\n new_item = form.save()\n messages.success(request, 'Your product was added to the '\n 'store successfully.')\n return redirect(reverse('item_info', args=[new_item.id]))\n else:\n messages.error(request, 'There was an issue adding the '\n 'product. Please ensure the form is valid.')\n else:\n form = ProductForm()\n else:\n messages.error(request, 'Sorry, you do not have permission to access '\n 'this page.')\n return redirect(reverse('home'))\n\n template = 'shop/add_item.html'\n context = {\n 'form': form,\n }\n\n return render(request, template, context)", "def addItem(name):\n # Store named category and the logged in user\n category = Category.query.filter_by(name=name).first_or_404()\n user = Users.query.filter_by(\n id=login_session['users_id']).first_or_404()\n # Verify that the logged in user is creator or admin\n if category.users_id != login_session['users_id'] and not user.admin:\n flash(' You are not authorized add items to that category.')\n return redirect(url_for('category.showCategory', name=name))\n # Initiate the form.\n form = ItemForm()\n # On POST of a valid form, add the new item.\n if form.validate_on_submit():\n item = Item(\n form.name.data, form.description.data, form.amazon_asin.data,\n form.picture.data, category.id, login_session['users_id'])\n # Check if there is a amazon url, and if so extract asin\n if form.amazon_url.data is not None:\n asin = re.search(\"[A-Z0-9]{10}\", form.amazon_url.data)\n if asin:\n item.amazon_asin = asin.group(0)\n db.session.add(item)\n db.session.commit()\n flash('New Item %s Successfully Created' % item.name)\n # Log new item\n current_app.logger.info('New Item %s Created on %s' % (\n item.name, str(item.dateCreated)))\n return redirect(url_for('category.showHome'))\n else:\n return render_template('newItem.html', form=form, category=category)", "def add_item():\n if 'username' not in login_session:\n response = make_response(\n json.dumps({'error': 'User is logged out. This should not happen'}), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n try:\n if request.method == 'POST':\n item = Item()\n # First we populate the new item.\n item.category_id = request.form['categoryId']\n item.picture = request.form['picture']\n item.name = request.form['name']\n item.price = request.form['price']\n item.description = request.form['description']\n item.user_id = login_session['user_id']\n # Now let's pull its category.\n category = session.query(Category).filter_by(id=item.category_id).one()\n # And make sure they're properly linked.\n item.category = category\n session.add(item)\n session.flush()\n id = item.id\n session.commit()\n response = make_response(\n json.dumps({'success': '', 'nonce': login_session['state'], 'id': id}), 200\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)", "def newItem(category_id):\n category = session.query(Category).filter_by(id=category_id).one()\n if request.method == 'POST':\n newItem = Item(name=request.form['name'],\n description=request.form['description'],\n price=request.form['price'], category_id=category.id,\n user_id=login_session['user_id'])\n session.add(newItem)\n session.commit()\n flash('New Item %s Successfully Created' % (newItem.name))\n return redirect(url_for('showItem', category_id=category.id))\n else:\n return render_template('newitem.html', category_id=category.id)", "def post(self, name):\n data = Item.parser.parse_args()\n\n item = ItemModel.find_item(name)\n\n if item:\n return {\"message\": \"item already exist\"}, 403\n\n new_item = ItemModel(name, **data)\n new_item.save_to_db()\n\n return new_item.json(), 201", "def post():\n\n form = forms.PostForm()\n if form.validate_on_submit():\n models.Post.create(title=form.title.data,\n date=form.date.data,\n time_spent=form.time_spent.data,\n details=form.details.data,\n remember=form.remember.data)\n return redirect(url_for('index'))\n return render_template('new.html', form=form)", "def post_item(self, data):\n\n url = reverse(\"item-list\")\n response = self.client.post(url, data, format=\"json\")\n return response", "def stock_submit():\n item = {\n 'title': request.form.get('title'),\n 'description': request.form.get('description'),\n 'cost': request.form.get('cost')\n }\n stock.insert_one(item)\n return redirect(url_for('stock_index'))", "def create_post():\n form = PostForm()\n time = datetime.utcnow()\n if form.validate_on_submit():\n try:\n post = Post(user_id=current_user.id, url=form.url.data, body=form.body.data)\n db.session.add(post)\n db.session.commit()\n flash(\"Congratulations, you have successfully created a post!\")\n return redirect(url_for(\"index\"))\n except:\n flash(\"Sorry, there was an error creating your post!\")\n return redirect(url_for(\"index\"))\n return render_template(\"create.html\", title=\"Create Post\", form=form)", "def post(self, name):\n row = ItemModel.get_item(name)\n if row:\n return {\"message\": \"There is another item with this name\"}, 422\n\n data = Item.parser.parse_args()\n item = ItemModel(name, data[\"price\"])\n try:\n item.save_to_db()\n except:\n return {\"message\": \"An error ocurred inserting the item.\"}, 500\n return item.json(), 200", "def newActionItem():\n if request.method == 'POST':\n # Connect to the database\n con = connect()\n Base.metadata.bind = con\n # Creates a session\n DBSession = sessionmaker(bind=con)\n dbsession = DBSession()\n\n user_id = getUserID(session['email'])\n\n actions = Actions(date_time=request.form['date_time'],\n finding=request.form['finding'],\n corrective_action=request.form['corrective_action'],\n due_date=request.form['due_date'],\n open_close='t',\n user_id=user_id)\n dbsession.add(actions)\n dbsession.commit()\n\n return redirect(url_for('actions'))\n else:\n user_profile = (session['username'], session['picture'])\n return render_template('actions_new.html', user_profile=user_profile)", "def test_api_can_create_shoppingitem(self):\n response = self.client.post(\n reverse('shoppingitems', args=[1]), self.shoppingitem_data, format=\"json\"\n )\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Endpoint to display update item page.
def update_item_page(item_name, catagory_name): item = Item.fetch_by_name_and_catagory_name(item_name, catagory_name) catagories = [c.name for c in Catagory.fetch_all()] return render_template( 'edit_item.html', catagories=catagories, values={ 'name': item.name, 'catagory': item.catagory_name, 'description': item.description }, )
[ "def update_item(self, item_form):\n pass", "def editItem(id):\r\n item = Item.query.filter_by(id=id).first()\r\n\r\n # Abort if logged in user is not the owner of the page\r\n if int(current_user.get_id()) != item.owner_id:\r\n abort(403);\r\n\r\n form = EditForm(id=id, name=item.name, description=item.description)\r\n if form.validate_on_submit():\r\n item.name = bleach.clean(form.name.data)\r\n item.description = bleach.clean(form.description.data)\r\n\r\n db.session.add(item)\r\n db.session.commit()\r\n\r\n return redirect(url_for('main.dashboard'))\r\n\r\n return render_template('main/editItem.html', form=form)", "def update_item(item_id):\n name = flask.request.form['name']\n description = flask.request.form['description']\n\n item_to_update = session.query(Item).get(item_id)\n item_to_update.name = name\n item_to_update.description = description\n\n session.add(item_to_update)\n session.commit()", "def edit_item(**kwargs):\n db.session.add(kwargs[\"item\"])\n db.session.commit()\n if kwargs[\"is_bucket\"]:\n item_type = \"bucket list\"\n elif kwargs[\"is_item\"]:\n item_type = \"bucket list item\"\n\n message = {\"message\": \"Successfully updated \" + item_type + \".\"}\n response = marshal(kwargs[\"item\"], kwargs[\"serializer\"])\n response.update(message)\n return response", "def update(self, request, title=None, items=None, *args, **kwargs):\n\n try:\n checklist_template = self.get_object(request, args, **kwargs)\n except ObjectDoesNotExist:\n return DOES_NOT_EXIST\n\n if not self.has_modify_permissions(request, checklist_template):\n return self._no_access_error(request.user)\n\n checklist_template.title = title\n checklist_template.items = items\n checklist_template.save()\n\n checklist_template = ChecklistTemplate.objects.filter(\n pk=kwargs[self.uri_object_key]\n ).first()\n\n return 200, {\n self.item_result_key: checklist_template\n }", "def update(self, request, pk=None): #update a specific object\n return Response({'http_method': 'PUT'})", "def edit_item(item_name):\n item = Item.query.filter_by(name=item_name).first()\n\n # If the URL contains a bad item name, send a 404\n if not item:\n abort(404)\n\n # If the current user is not authorized to edit the item because\n # the item was created by a different user, send a 403\n elif current_user != item.user:\n abort(403)\n\n form = ItemForm()\n\n # If the form is validated, update the item with its data to the\n # database\n if form.validate_on_submit():\n\n # If the item name or sport has been modified, check that an\n # item with the same name and sport does not already exist, or\n # send a flash message and do not add the new item to the\n # database\n if form.name.data != item.name or form.sport.data != item.sport:\n query = Item.query.filter_by(name=form.name.data,\n sport=form.sport.data).first()\n if query:\n flash('This sport already has an item with that name.', 'bad')\n return redirect(url_for('items.edit_item',\n item_name=item_name))\n\n # If the item name or sport has not been modified, update all\n # details to the database, send a flash message, and redirect\n # to 'home'\n else:\n item.name = form.name.data\n item.sport = form.sport.data\n item.category = form.category.data\n item.description = form.description.data\n item.private = form.private.data\n db.session.commit()\n flash(f'\"{item.name}\" has been updated!', 'good')\n return redirect(url_for('items.item', item_name=item_name))\n\n # If the form is being requested, not submitted, pre-fill the form\n # with existing item data\n elif request.method == 'GET':\n form.name.data = item.name\n form.sport.data = item.sport\n form.category.data = item.category\n form.description.data = item.description\n form.private.data = item.private\n\n return render_template('edit_item.html', item=item, form=form)", "def item_edit_route(item_id):\n\n target_item = get_item(item_id)\n\n # checking access rights\n if target_item.owner != user_info()['id']:\n flash('Only owner can edit item')\n return redirect(url_for('item.item_route', item_id=item_id))\n\n if target_item is None:\n abort(404)\n\n # some protection\n csrf = generate_csrf_token()\n\n if request.method == 'POST':\n if csrf != request.form['csrf_token']:\n abort(403)\n else:\n update_item(item_id)\n flash('Item updated')\n # sending user to item page after edit is done\n return redirect(url_for('item.item_route', item_id=item_id))\n\n if request.method == 'GET':\n return render_template('item_edit.html', page={\n 'title': 'Edit item'\n }, user=user_info(), content={\n 'is_edit': True,\n 'csrf_token': csrf,\n 'item': target_item\n })", "def edit_item(request, item_id):\n if request.user.is_superuser:\n item = get_object_or_404(Product, pk=item_id)\n if request.method == \"POST\":\n form = ProductForm(request.POST, request.FILES, instance=item)\n if form.is_valid():\n form.save()\n messages.success(request, 'Item was successfully updated.')\n return redirect(reverse('item_info', args=[item.id]))\n else:\n messages.error(request, 'There was an issue updating the '\n 'item. Please make sure the form is valid.')\n else:\n form = ProductForm(instance=item)\n else:\n messages.error(request, 'Sorry, you do not have permission to access '\n 'this page.')\n return redirect(reverse('home'))\n\n template = 'shop/edit_item.html'\n context = {\n 'form': form,\n 'item': item,\n }\n\n return render(request, template, context)", "def edit_item(item_id):\n if 'userinfo' not in session.keys():\n session['target'] = url_for('edit_item', item_id=item_id)\n return redirect(url_for('gconnect'))\n if request.method == 'POST':\n sqlsession = SQLSESSION()\n item = sqlsession.query(Item).filter_by(id=item_id).first()\n item.name = request.form['name']\n item.category_id = request.form['category']\n item.description = request.form['description']\n sqlsession.commit()\n return redirect(url_for('view_item', item_id=item_id))\n sqlsession = SQLSESSION()\n item = sqlsession.query(Item).filter_by(id=item_id).first()\n categories = sqlsession.query(Category).all()\n return render_template(\"edit_item.html\",\n item=item,\n categories=categories)", "def issueUpdateView(context, issue):\n\n user = context.get('user')\n\n if not user.has_perm('IssueTracker.can_change'):\n return \"\"\n\n if issue.item:\n item = issue.item.item\n \n args = {\n \"form\": forms.UpdateMachineForm(instance=item),\n }\n\n return render_to_string('issueUpdate.html', args, context)\n\n return \"\"", "def partial_update(self, request, pk=None): #partial update a specific object\n return Response({'http_method': 'PATCH'})", "def edit_item(item_id):\n\n edit_item_form = EditItemForm()\n\n categories = Category.objects()\n\n edit_item_form.category.choices = [(category.value, category.label) for category in categories]\n\n item = Item.objects(id = item_id).first()\n\n if request.method == \"GET\":\n\n edit_item_form.title.data = item.title\n edit_item_form.description.data = item.description\n edit_item_form.price.data = item.price\n edit_item_form.category.data = item.category\n\n\n if edit_item_form.validate_on_submit():\n\n item_user = item.user\n\n user = User.objects(id = session['user']['id']).first()\n\n if user == item_user :\n\n item.title = edit_item_form.title.data\n item.description = edit_item_form.description.data\n item.price = edit_item_form.price.data\n item.category = edit_item_form.category.data\n \n item.save()\n\n flash(\"Your item has been edited successfully.\")\n\n else:\n\n flash(\"Action Not Allowed: Editing an item you don't own.\")\n\n return redirect(url_for('home.home'))\n\n return render_template(\"item/edit-item.html\", form = edit_item_form)", "def update():\n try:\n shopping_list_id = request.args.get('id')\n request_data = request.get_json()\n response = ShoppingList.update(request_data, shopping_list_id)\n return response, 200\n except Exception as error:\n print('Error {}'.format(error))", "def edit(request):\n the_id = request.matchdict[\"id\"]\n entry = request.dbsession.query(Entry).get(the_id)\n if request.method == \"POST\":\n entry.title = request.POST[\"title\"]\n entry.body = request.POST[\"body\"]\n\n request.dbsession.flush()\n return HTTPFound(request.route_url(\"homepage\"))\n return {\"entry\": entry}", "def editItem(category_item_id):\n editedItem = db.findItem(id=category_item_id)\n if editedItem.user_id != login_session['user_id']:\n return not_authorized()\n if request.method == 'POST':\n db.updateItem(editedItem, request.form)\n return redirect(url_for('showCatalog'))\n return render_template(\n 'edit_item.html', categories=db.getAllCategories(), item=editedItem)", "def update(_id): \n pages_object = Pages(_id)\n page = pages_object.page\n \n language_name = languages_object.get_languages(3)\n \n # Update page\n if request.method == 'POST':\n if pages_object.update():\n return redirect(url_for('pages.overview'))\n \n len_of_label = len(page['label'])\n \n # Come back a message when there is an error\t\n if not pages_object.message is None:\n message = pages_object.message\n status = pages_object.status\n \n return render_template('{}/update.html'.format(MODULE_DIR), **locals())", "def edit_item(item_id):\n if 'username' not in login_session:\n response = make_response(\n json.dumps({'error': 'User is logged out. This should not happen'}), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n try:\n if request.method == 'POST':\n item = session.query(Item).filter_by(id=item_id).one()\n item.picture = request.form['picture']\n item.name = request.form['name']\n item.price = request.form['price']\n item.description = request.form['description']\n item.user_id = login_session['user_id']\n session.add(item)\n session.commit()\n response = make_response(\n json.dumps({'success': '', 'nonce': login_session['state']}), 200\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)", "def update_activity_item(request):\n try:\n item_id = request.POST.get(\"item_id\")\n item_name = request.POST.get(\"item_name\")\n item_cost = request.POST.get(\"item_cost\")\n item_quantity = request.POST.get(\"item_quantity\")\n\n activity_item_obj = ActivityItem.objects.get(id=item_id)\n activity_item_obj.item = item_name\n activity_item_obj.item_cost = item_cost\n activity_item_obj.item_quantity = item_quantity\n activity_item_obj.save()\n\n activity_id = ActivityItem.objects.get(id=item_id).fk_activity_id\n activity_item_obj = ActivityItem.objects.filter(fk_activity_id=activity_id)\n render_string = render_to_string(\"activity_item_list.html\", {\"activity_item_obj\": activity_item_obj})\n return JsonResponse({\"render_string\": render_string, \"activity_id\": activity_id})\n except Exception:\n error_save(str(traceback.format_exc()))\n return redirect(\"error_handler_500\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return dict containing form validation errors for create / update item.
def form_errors(form): errors = {} max_name_length = Item.name.property.columns[0].type.length if not form.get('name', None): errors['name'] = 'Please enter a name.' elif len(form['name']) > max_name_length: errors['name'] = ( 'Name must be less than %s characters.' % max_name_length ) if not Catagory.exists(form.get('catagory', None)): errors['catagory'] = 'Not a valid catagory.' if not form.get('description', None): errors['description'] = 'Please enter a description.' return errors
[ "def compact_form_errors(form):\n errors = {}\n\n for name, validationerror in form.errors.as_data().items():\n errors[name] = [item.code for item in validationerror]\n\n return errors", "def errors(self):\n _errors = {}\n # pylint: disable=no-member\n for name, field in self._fields.items():\n if field.errors:\n _errors[name] = field.errors.pop()\n\n return _errors", "def render_errors(form):\n return {\n \"form\": form\n }", "def fill_form_errors(form, err):\n if isinstance(err.detail, dict):\n for field, msg in six.iteritems(err.detail):\n if field in form.fields:\n form.add_error(field, msg)\n elif field == api_settings.NON_FIELD_ERRORS_KEY:\n form.add_error(NON_FIELD_ERRORS, msg)\n else:\n form.add_error(NON_FIELD_ERRORS,\n _(\"No field '%(field)s': %(msg)s\" % {\n 'field': field, 'msg': msg}))", "def _errors_form(self, form):\n\n form_errors = form.errors\n for fields_error in form_errors.keys():\n for error in form_errors[fields_error]:\n messages.error(self.request, fields_error + \": \" + error, 'danger')", "def get_validation_errors(self):\n return [err.to_dict() for err in self._schema.validator.validation_errors]", "def form_errors_json(form=None):\n if form:\n return mark_safe(dict(form.errors.items())) # noqa: S703, S308\n return {}", "def form_invalid_add_global_errormessages(self, form):\n if self.get_selected_items_form_attribute() in form.errors:\n errormessages = form.errors[self.get_selected_items_form_attribute()]\n for errormessage in errormessages:\n messages.error(self.request, errormessage)", "def describe_invalid_form(form):\n return dict((i.name, i.note) for i in form.inputs if i.note is not None)", "def get_template_errors(self):\n if not hasattr(self, u'_errors'):\n self._errors = MutliErrorDict()\n return self._errors", "def _validate_error(cls, item):\n if item.error and item.status_code not in [\n job_models.STATUS_CODE_FAILED, job_models.STATUS_CODE_CANCELED]:\n cls._add_error(\n base_model_validators.ERROR_CATEGORY_ERROR_CHECK,\n 'Entity id %s: error: %s for job is not empty but '\n 'job status is %s' % (item.id, item.error, item.status_code))\n\n if not item.error and item.status_code in [\n job_models.STATUS_CODE_FAILED, job_models.STATUS_CODE_CANCELED]:\n cls._add_error(\n base_model_validators.ERROR_CATEGORY_ERROR_CHECK,\n 'Entity id %s: error for job is empty but '\n 'job status is %s' % (item.id, item.status_code))", "def field_errors(bound_field):\n seen = []\n errors = {}\n if hasattr(bound_field.field, \"fields\"):\n for idx, subfield in enumerate(bound_field.field.fields):\n key = \"%s_%d\" % (bound_field.auto_id, idx)\n subfield_errors = getattr(subfield.widget, \"errors\", [])\n errors[key] = subfield_errors\n seen.extend(subfield_errors)\n for error in bound_field.errors:\n if error not in seen:\n errors.setdefault(bound_field.auto_id, [])\n errors[bound_field.auto_id].append(error)\n return errors.items()", "def make_error_messages(self, request, form):\n for field_name, error_list in form.errors.items():\n for err_msg in error_list:\n messages.error(request=request,\n message=err_msg,\n extra_tags=field_name)", "def form_invalid(self, form):\r\n logger.error(\"%s data:\" % self.request.method)\r\n logger.error(pprint.pformat(form.data))\r\n errors = dict((k, v[0]) for k, v in form.errors.items())\r\n logger.error(unicode(errors))\r\n if form.non_field_errors():\r\n logger.error(form.non_field_errors())\r\n return HttpResponseBadRequest('form failed to validate')", "def form_invalid(self, form):\n\n self.logger.error(\"%s data:\" % self.request.method)\n self.logger.error(pprint.pformat(form.data))\n errors = dict((k, v[0]) for k, v in form.errors.items())\n self.logger.error(unicode(errors))\n if form.non_field_errors():\n self.logger.error(form.non_field_errors())\n return HttpResponseBadRequest('form failed to validate')", "async def form_invalid(self, form):\n self.write_json(\n data={'errors': form.errors},\n message=_('Validation failed.'),\n status_code=400\n )", "def errors(self):\n\n dict = {\"Stellar Mass Error\":[self.st_masserr1,self.st_masserr2],\n \"Stellar Radius Error\":[self.st_raderr1,self.st_raderr2]}\n\n return dict", "def get_field_errors(self, bound_field):\r\n errors = super(NgFormValidationMixin, self).get_field_errors(bound_field)\r\n identifier = format_html('{0}.{1}', self.form_name, self.add_prefix(bound_field.name))\r\n errors_function = '{0}_angular_errors'.format(bound_field.field.__class__.__name__)\r\n try:\r\n errors_function = getattr(VALIDATION_MAPPING_MODULE, errors_function)\r\n potential_errors = types.MethodType(errors_function, bound_field.field)()\r\n except (TypeError, AttributeError):\r\n errors_function = getattr(VALIDATION_MAPPING_MODULE, 'Default_angular_errors')\r\n potential_errors = types.MethodType(errors_function, bound_field.field)()\r\n errors.append(SafeTuple((identifier, '$dirty', '$valid', 'valid', ''))) # for valid fields\r\n errors.extend([SafeTuple((identifier, '$dirty', pe[0], 'invalid', force_text(pe[1])))\r\n for pe in potential_errors])\r\n return errors", "def validation_errors(self):\n return self._validation_errors" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r"""Chooses a BoTorch `MarginalLogLikelihood` class using the given `Model` class.
def choose_mll_class( model_class: Type[Model], state_dict: Optional[Dict[str, Tensor]] = None, refit: bool = True, ) -> Type[MarginalLogLikelihood]: # NOTE: We currently do not support `ModelListGP`. This code block will only # be relevant once we support `ModelListGP`. if (state_dict is None or refit) and issubclass(model_class, ModelListGP): return SumMarginalLogLikelihood return ExactMarginalLogLikelihood
[ "def get_classifier(model_type = 'mlp', **kwargs):\n if model_type == 'logistic':\n model = LogisticRegression(penalty='none', **kwargs)\n elif model_type == 'gradient_boosting':\n model = HistGradientBoostingClassifier(**kwargs)\n elif model_type == 'mlp':\n model = MLPClassifier(**kwargs)\n else:\n raise ValueError('Invalid model_type')\n return model", "def from_botorch(\n cls,\n model: Model,\n mll_class: Type[MarginalLogLikelihood] = ExactMarginalLogLikelihood,\n ) -> Surrogate:\n surrogate = cls(botorch_model_class=model.__class__, mll_class=mll_class)\n surrogate._model = model\n # Temporarily disallowing `update` for surrogates instantiated from\n # pre-made BoTorch `Model` instances to avoid reconstructing models\n # that were likely pre-constructed for a reason (e.g. if this setup\n # doesn't fully allow to constuct them).\n surrogate._constructed_manually = True\n return surrogate", "def set_model(self, likelihood_model_instance):\n pass", "def GSM_log_likelihood(X, model):\n D, M = X.shape\n k = model.mix.shape[0]\n log_likelihood = 0\n for i in range(M):\n logpdf_X = 0\n for j in range(k):\n mvn = multivariate_normal(cov=model.cov[j, :])\n logpdf_X = mvn.logpdf(x=X[:, i]) * model.mix[j]\n log_likelihood += logpdf_X\n return log_likelihood", "def make_mlp_likelihood(model=None, model_config=None, wiener_params=None, **kwargs):\n\n def random(\n self,\n keep_negative_responses=True,\n add_model=False,\n add_model_parameters=False,\n add_outliers=False,\n keep_subj_idx=False,\n ):\n \"\"\"\n Generate random samples from a given model (the dataset matches the size of the respective observated dataset supplied as an attribute of self).\n \"\"\"\n\n # This can be simplified so that we pass parameters directly to the simulator ...\n theta = np.array(model_config[\"params_default\"], dtype=np.float32)\n keys_tmp = self.parents.value.keys()\n cnt = 0\n\n for param in model_config[\"params\"]:\n if param in keys_tmp:\n theta[cnt] = np.array(self.parents.value[param]).astype(np.float32)\n cnt += 1\n\n sim_out = simulator(theta=theta, model=model, n_samples=self.shape[0], max_t=20)\n\n # Add outliers:\n if add_outliers:\n if self.parents.value[\"p_outlier\"] > 0.0:\n sim_out = hddm_dataset_generators._add_outliers(\n sim_out=sim_out,\n p_outlier=self.parents.value[\"p_outlier\"],\n max_rt_outlier=1 / wiener_params[\"w_outlier\"],\n )\n\n sim_out_proc = hddm_preprocess(\n sim_out,\n keep_negative_responses=keep_negative_responses,\n keep_subj_idx=keep_subj_idx,\n add_model_parameters=add_model_parameters,\n )\n\n if add_model:\n sim_out_proc[\"model\"] = model\n\n return sim_out_proc\n\n def pdf(self, x):\n # Check if model supplied has only two choice options\n # If yes --> check if two-dimensional input (rt, response) or one-dimensional input (rt) --> processing depends on it\n # If not --> input x has to be two dimensional (rt, response) becasuse we can't deduce response from rt\n x = np.array(x, dtype=np.float32)\n\n if len(x.shape) == 1 or x.shape[1] == 1:\n rt = x\n response = rt / np.abs(rt)\n rt = np.abs(rt)\n elif x.shape[1] == 2:\n rt = x[:, 0]\n response = x[:, 1]\n\n params = np.array(\n [self.parents[param] for param in model_config[\"params\"]]\n ).astype(np.float32)\n\n return hddm.wfpt.wiener_like_nn_mlp_pdf(\n rt,\n response,\n params,\n p_outlier=self.parents.value[\"p_outlier\"],\n w_outlier=wiener_params[\"w_outlier\"],\n network=kwargs[\"network\"],\n )\n\n def cdf(self, x):\n # TODO: Implement the CDF method for neural networks\n return \"Not yet implemented\"\n\n def make_likelihood():\n likelihood_str = make_likelihood_str_mlp(\n config=model_config, wiener_params=wiener_params\n )\n exec(likelihood_str)\n my_fun = locals()[\"custom_likelihood\"]\n return my_fun\n\n # TODO: Allow for rt's of -999 in LAN likelihoods\n def make_likelihood_missing_data():\n return\n\n likelihood_ = make_likelihood()\n\n wfpt_nn = stochastic_from_dist(\"Wienernn_\" + model, partial(likelihood_, **kwargs))\n\n wfpt_nn.pdf = pdf\n wfpt_nn.cdf_vec = None # AF TODO: Implement this for neural nets (not a big deal actually but not yet sure where this is ever used finally)\n wfpt_nn.cdf = cdf\n wfpt_nn.random = random\n return wfpt_nn", "def compute_prior_predictive_log_likelihoods(simulated_y,\n orig_df,\n choice_col,\n model_obj):\n # Get the long-fitted probabilities\n long_probs = model_obj.long_fitted_probs\n log_long_probs = np.log(long_probs)\n\n # Populate the log-likelihood values\n log_likes = simulated_y.T.dot(log_long_probs).ravel()\n\n return log_likes", "def __init__(self, model):\n TreeLikelihoodBase.__init__(self, model)", "def MVN_log_likelihood(X, model):\n\n LL = np.sum(multivariate_normal.logpdf(X.T, model.means.T, model.cov, allow_singular=True))\n\n return LL", "def _joint_log_likelihood(self, X): \n assert hasattr(self, 'cond_probs'), 'Model not fit' \n \n def _vectorized_get(key, dict_):\n return np.vectorize(dict_.__getitem__)(key)\n\n class_probs_arr = np.atleast_2d([val for val in self.class_probs.values()])\n jll = np.repeat(np.log(class_probs_arr), self.n_samples, axis=0)\n\n for col in self.categorical_columns:\n log_probs = np.log(np.stack([_vectorized_get(self.X[col].values, self.cond_probs[i][col])\\\n for i in self.classes_], axis=1))\n\n # impute log cond prob to 0 in case of 0% cond prob in training set\n # TODO: this should probably be a toggle option in case user wants to throw an error on unseen values\n log_probs[log_probs == -np.inf] = 0\n\n jll += log_probs\n\n for col in self.numerical_columns:\n log_probs_by_class = []\n # willing to allow an O(n_features * n_classes) loop for now\n for label in self.classes_:\n mean = self.cond_probs[label][col]['mean']\n std = self.cond_probs[label][col]['std']\n\n # impute missing values to 0\n log_probs = np.log(gaussian_pdf(self.X[feat].values, mean, std))\n log_probs_0filled = np.nan_to_num(log_probs)\n\n log_probs_by_class.append(log_probs_0filled)\n\n log_probs_array = np.stack(log_probs_by_class, axis=1)\n jll += log_probs_array\n \n return jll", "def GSM_log_likelihood(X, model):\n k = len(model.mix)\n d = len(X)\n pdf = np.zeros((X.shape[1], k))\n for i, pi_i in enumerate(model.mix):\n denominator = ((2 * np.pi) ** d) * np.linalg.det(model.cov[i]) ** 0.5\n inv_e = np.linalg.inv(model.cov[i])\n for j, x in enumerate(X.T):\n likelihood = np.exp((-0.5) * (x.dot(inv_e).dot(x.T))) / denominator\n pdf[j, i] = np.log(likelihood * pi_i)\n if np.isinf(pdf[j, i]):\n pdf[j, i] = 0\n\n weighted_sum = logsumexp(pdf, axis=1)\n ll = np.sum(weighted_sum)\n return ll\n # TODO: YOUR CODE HERE", "def log_likelihood(model, X, Z):\n batch_size = X.shape[0]\n n_samples = Z.shape[1]\n\n log_p_xz = torch.Tensor(batch_size, n_samples).to(device)\n mu, logvar = model.encode(X.view(batch_size, 1, 28, 28))\n\n for i in range(n_samples):\n out = model.decode(Z[:, i, :])\n\n # reconstruction error using BCE\n log_p_xz[:, i] = -nn.functional.binary_cross_entropy(\n out.view(-1, 784), X.view(-1, 784), reduction=\"none\"\n ).sum(dim=1)\n # q(z|x) follows a multivariate normal distribution of mu, sigma^2\n log_q_zx = probability_density_function(Z, mu, logvar).to(device)\n\n # p(z) follows a standard multivariate normal distribution\n log_p_z = probability_density_function(\n Z, torch.zeros_like(mu), torch.zeros_like(logvar)\n ).to(device)\n\n log_p_x = log_p_xz + log_p_z - log_q_zx\n return np.log(n_samples) - log_p_x.logsumexp(dim=1) # Negative log likeyhood", "def ICA_log_likelihood(X, model):\n\n # TODO: YOUR CODE HERE", "def _build(self,\n model_type: str,\n **kwargs) -> Predictor:\n if model_type == 'classifier':\n modelcls = sklearn.gaussian_process.GaussianProcessClassifier\n elif model_type == 'regressor':\n modelcls = sklearn.gaussian_process.GaussianProcessRegressor\n else:\n raise ValueError(\n '`model_type` should be \"classifier\" or \"regressor\"')\n model = modelcls(**kwargs)\n return model", "def log_marginal_likelihood(X_train,y_train,phi,tau=1.,Ve=1.e-10):", "def model_class(self):\n model_name = self.model_name()\n\n if not model_name:\n return None\n\n try:\n (app, mdl) = model_name.strip().split('.')\n except ValueError:\n logger.error(f\"Invalid 'model' parameter for setting {self.key} : '{model_name}'\")\n return None\n\n app_models = apps.all_models.get(app, None)\n\n if app_models is None:\n logger.error(f\"Error retrieving model class '{model_name}' for setting '{self.key}' - no app named '{app}'\")\n return None\n\n model = app_models.get(mdl, None)\n\n if model is None:\n logger.error(f\"Error retrieving model class '{model_name}' for setting '{self.key}' - no model named '{mdl}'\")\n return None\n\n # Looks like we have found a model!\n return model", "def MVN_log_likelihood(X, model):\n D, M = X.shape\n X_normalized = normalize_log_likelihoods(X.copy())\n mvn = multivariate_normal(mean=model.mean, cov=model.cov)\n return mvn.logpdf(X_normalized.T).sum()\n # log_2pi = D * np.log(2 * np.pi)\n # log_det = np.log(np.linalg.det(model.cov))\n # residuals = calc_residuals(X_normalized, model.mean, \"minus\")\n # mahalanobis_distance = np.dot(np.dot(residuals.T, np.linalg.inv(model.cov)), residuals)\n # return -0.5 * (log_2pi + log_det + mahalanobis_distance).sum()", "def log_marginal(self):\n #\n # Predictive covariance of x is sum of covariance of phi a and covariance of x|a\n x_Sigma = self.phi @ self.phi.T + np.diag(self.sigma_n**2 * np.ones(self.M))\n #\n # Predictive mean is 0 by symmetry\n # so given that x is distributed as a MVN, the exact marginal is\n lp_exact = st.multivariate_normal.logpdf(self.x, cov=x_Sigma)\n #\n return lp_exact", "def __init__(self, reg_penalty='l2', reg_inv=1.0, k_fold=5, random_state=0):\n print(\"Initialize model Logistic Regression\")\n self.reg_penalty = reg_penalty\n self.reg_inv = reg_inv\n self.k_fold = k_fold\n self.random_state = random_state\n self.model = sklearn.linear_model.LogisticRegression(penalty=self.reg_penalty,\n C=self.reg_inv,\n max_iter=1000, \n random_state=self.random_state)", "def marginalLikelihood(self,model,yKernel,f,included=True):\n\n mu,cov = self.marginalLikelihoodParams(model,yKernel,f,included)\n\n # data = np.zeros(self.n*num)\n # for i in range(num):\n # data[i*self.n:(i+1)*self.n] = model.y[:,ind[i]] / model.designMatrix[f,ind[i]]\n data = model.residual(f).ravel(1)\n\n rv = scipy.stats.multivariate_normal(mu,cov)\n return rv.logpdf(data)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r"""Chooses a BoTorch `AcquisitionFunction` class.
def choose_botorch_acqf_class() -> Type[AcquisitionFunction]: # NOTE: In the future, this dispatch function could leverage any # of the attributes of `BoTorchModel` or kwargs passed to # `BoTorchModel.gen` to intelligently select acquisition function. return qNoisyExpectedImprovement
[ "def _function_class(self):\n return FriCASExpectFunction", "def run_acquisition_function(\n acquisition_function,\n configurations,\n objective_weights,\n regression_models,\n param_space,\n scalarization_method,\n objective_limits,\n iteration_number,\n data_array,\n model_type,\n classification_model=None,\n number_of_cpus=0,\n):\n tmp_objective_limits = None\n configurations = concatenate_list_of_dictionaries(configurations)\n configurations = data_dictionary_to_tuple(\n configurations, param_space.get_input_parameters()\n )\n if acquisition_function == \"TS\":\n scalarized_values, tmp_objective_limits = thompson_sampling(\n configurations,\n objective_weights,\n regression_models,\n param_space,\n scalarization_method,\n objective_limits,\n model_type,\n classification_model,\n number_of_cpus,\n )\n elif acquisition_function == \"UCB\":\n scalarized_values, tmp_objective_limits = ucb(\n configurations,\n objective_weights,\n regression_models,\n param_space,\n scalarization_method,\n objective_limits,\n iteration_number,\n model_type,\n classification_model,\n number_of_cpus,\n )\n elif acquisition_function == \"EI\":\n scalarized_values, tmp_objective_limits = EI(\n configurations,\n data_array,\n objective_weights,\n regression_models,\n param_space,\n scalarization_method,\n objective_limits,\n iteration_number,\n model_type,\n classification_model,\n number_of_cpus,\n )\n else:\n print(\"Unrecognized acquisition function:\", acquisition_function)\n raise SystemExit\n\n scalarized_values = list(scalarized_values)\n\n # we want the local search to consider all points feasible, we already account for feasibility it in the scalarized value\n feasibility_indicators = [1] * len(scalarized_values)\n\n return scalarized_values, feasibility_indicators", "def register_acquisition(acq_class: Type[Acquisition]) -> None:\n class_name = acq_class.__name__\n CLASS_TO_REGISTRY[Acquisition].update({acq_class: class_name})\n CLASS_TO_REVERSE_REGISTRY[Acquisition].update({class_name: acq_class})", "def choose_class(self, *args, **kwargs):", "def _function_element_class(self):\n return FriCASFunctionElement", "def gatesetfn_factory(fn):\n class GSFTemp(GateSetFunction):\n \"\"\" GateSetFunction class created by gatesetfn_factory \"\"\"\n def __init__(self, gateset, *args, **kwargs):\n \"\"\" \n Creates a new GateSetFunction dependent on all of its GateSet\n argument's paramters\n \"\"\"\n self.args = args\n self.kwargs = kwargs \n GateSetFunction.__init__(self, gateset, [\"all\"])\n \n def evaluate(self, gateset):\n \"\"\" Evaluate this gate-set-function at `gateset`.\"\"\"\n return fn(gateset, *self.args, **self.kwargs)\n \n GSFTemp.__name__ = fn.__name__ + str(\"_class\")\n return GSFTemp", "def autoclass(self) -> Optional[pulumi.Input['BucketAutoclassArgs']]:\n return pulumi.get(self, \"autoclass\")", "def choice(func):\n # __choice_fn func_name used to identify function in Alternation.execute\n def __choice_fn(*args, **kwargs):\n return Choice(func, *args, **kwargs)\n return __choice_fn", "def _make_acquisition_from_model(self, model: IModel) -> Acquisition:\n if self.acquisition_class is EntropySearch: # pragma: no cover\n # Entropy Search requires one extra argument.\n # Here and in several other places, we ignore an apparent type error caused by the\n # use of @lru_cache, which causes variables that would otherwise just be float to have\n # the type lru_cache_wrapper(float). TODO figure out if lru_cache is really needed.\n return EntropySearch(model=model, space=self.space) # type: ignore # lru_cache_wrapper\n else:\n return self.acquisition_class(model=model) # type: ignore # may not have model argument", "def _get_class_creator(self, version):\n\n def _get_bcl2fastq2x_runner(self, config, binary):\n return BCL2Fastq2xRunner(config, binary)\n\n def _get_bcl2fastq1x_runner(self, config, binary):\n return BCL2Fastq1xRunner(config, binary)\n\n function_name = self.bcl2fastq_mappings[version][\"class_creation_function\"]\n function = locals()[function_name]\n return function", "def instantiate(self):\n details = json.loads(self.function_details)\n if 'function' not in details:\n raise ValueError(\"'function' not found in function_details\")\n if 'kwargs' not in details:\n raise ValueError(\"'kwargs' not found in function_details\")\n func_name = details['function']\n # limit to this module\n func = getattr(sys.modules[__name__], func_name)\n kwargs = {}\n for argset in details['kwargs']:\n kwargs[argset['name']] = argset['value']\n return func(**kwargs)", "def __init__(self, function):\n self.function = function", "def gatefn_factory(fn):\n class GSFTemp(GateSetFunction):\n \"\"\" GateSetFunction class created by gatefn_factory \"\"\"\n def __init__(self, gateset, gl, *args, **kwargs):\n \"\"\" Creates a new GateSetFunction dependent on a single gate\"\"\"\n self.gl = gl\n self.args = args\n self.kwargs = kwargs \n GateSetFunction.__init__(self, gateset, [\"gate:\"+gl])\n \n def evaluate(self, gateset):\n \"\"\" Evaluate this gate-set-function at `gateset`.\"\"\"\n return fn(gateset.gates[self.gl], gateset.basis,\n *self.args, **self.kwargs)\n \n GSFTemp.__name__ = fn.__name__ + str(\"_class\")\n return GSFTemp", "def getFunctionClass(functionID):\n d = { 1: Linear,\n 2: LinearDrag,\n 11: Gaussian,\n 12: GaussianDrag,\n 21: Lorentzian,\n 22: LorentzianDrag }\n return d[functionID]", "def start_acquisition(self):\n self.lib.StartAcquisition()", "def DefaultAcquirer():\n hook = GetCustomHook(\"DefaultAcquirer\")\n if hook is not None:\n return hook()\n return acm.FParty[_SETTINGS.Acquirer()]", "def __get_function(self):\n return random.choice(self.FUNCTIONS)", "def choose_func(*args):\n return _ida_kernwin.choose_func(*args)", "def _set_up_acq_opt_rand(self):\n def _random_max_wrap(*args):\n \"\"\" A wrapper so as to only return optimal point.\"\"\"\n _, opt_pt = random_maximise(*args)\n return opt_pt\n # Set this up in acq_optimise\n self.acq_optimise = lambda obj, max_evals: _random_max_wrap(obj, self.domain_bounds,\n max_evals)\n if self.get_acq_opt_max_evals is None:\n lead_const = 10 * min(5, self.domain_dim)**2\n self.get_acq_opt_max_evals = lambda t: np.clip(\n lead_const * np.sqrt(min(t, 1000)), 2000, 3e4)\n # Acquisition function should be evaluated via multiple evaluations\n self.acq_query_type = 'multiple'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Construct a `TrainingData` object based on sizes of Xs, Ys, and Yvars, and the type of model, for which the training data is intended.
def construct_training_data( Xs: List[Tensor], Ys: List[Tensor], Yvars: List[Tensor], model_class: Type[Model] ) -> TrainingData: if not isclass(model_class): # pragma: no cover raise ValueError( f"Expected `Type[Model]`, got: {model_class} " f"(type: {type(model_class)})." ) if len(Xs) == len(Ys) == 1: # Just one outcome, can use single model. return TrainingData(X=Xs[0], Y=Ys[0], Yvar=Yvars[0]) elif issubclass(model_class, BatchedMultiOutputGPyTorchModel) and all( torch.equal(Xs[0], X) for X in Xs[1:] ): # All Xs are the same and model supports batched multioutput. return TrainingData( X=Xs[0], Y=torch.cat(Ys, dim=-1), Yvar=torch.cat(Yvars, dim=-1) ) elif model_class is ModelListGP: # pragma: no cover # TODO: This will be case for `ListSurrogate`. raise NotImplementedError("`ModelListGP` not yet supported.") raise ValueError(f"Unexpected training data format for {model_class}.")
[ "def create(model_config, batch_size, num_workers=0, augmentations=None):\n path = model_config.data_dir('anon')\n\n train_dataset = Anon(path, partition='train', download=True)\n test_dataset = Anon(path, partition='test', download=True)\n\n return TrainingData(\n train_dataset,\n test_dataset,\n num_workers=num_workers,\n batch_size=batch_size,\n augmentations=augmentations\n )", "def _build_dataset(self):\n\t\ttrain_x_raw, train_y_raw, train_vocab, self.train_sent = self._load_data(self.train_data_path)\n\t\tdev_x_raw, dev_y_raw, dev_vocab, self.dev_sent = self._load_data(self.dev_data_path)\n\t\tself.vocab = train_vocab.union(dev_vocab)\n\t\tself.vocab.add(\"<unk>\")\n\t\tself.vocab2idx, self.label2idx, self.idx2vocab, self.idx2label = self._build_vocab_idx()\n\t\tself.train_x, self.train_y, self.train_max_length = self._build_data_index(train_x_raw, train_y_raw)\n\t\tself.dev_x, self.dev_y, self.dev_max_length = self._build_data_index(dev_x_raw, dev_y_raw)", "def _unpack_training_data(data, val=None):\n if isinstance(data, TrainingData):\n assert val is None\n return data\n\n if val is not None:\n x, y = data\n return TrainingData.from_x_y(x, y, val)\n\n train, val = data\n if not isinstance(train, Dataset):\n xx, yy = train\n train = RamDataset(xx, yy)\n if not isinstance(val, Dataset):\n xx, yy = val\n val = RamDataset(xx, yy)\n return TrainingData(train, val)", "def CreateAndTrainModel(train_X, train_Y, dev_X, dev_Y,\r\n time_steps, batch_size, stateful, epochs,\r\n model_type, units, dropout_rate,\r\n scale_type,\r\n wavelet_transform_iterations,\r\n encode_features,\r\n include_tweets_sentiment,\r\n output_directory, save_configuration):\r\n\r\n # always generate the same random numbers\r\n np.random.seed(3)\r\n\r\n number_of_features = features\r\n if encode_features == True:\r\n number_of_features = encoded_features\r\n\r\n model = create_sequence_model(model_type=model_type, number_of_features=number_of_features,\r\n time_steps=time_steps, batch_size=batch_size, stateful=stateful,\r\n units=units, dropout_rate=dropout_rate)\r\n\r\n Training(model, train_X, train_Y, dev_X, dev_Y,\r\n time_steps=time_steps, batch_size=batch_size, stateful=stateful, epochs=epochs,\r\n model_type=model_type, units=units, dropout_rate=dropout_rate,\r\n scale_type=scale_type,\r\n wavelet_transform_iterations=wavelet_transform_iterations,\r\n encode_features=encode_features,\r\n include_tweets_sentiment=include_tweets_sentiment,\r\n output_directory=output_directory)\r\n\r\n #Due to the custom RMSE function when running the function in a separate process\r\n #the deserialization of the model does not find the custom function. Solve this later.\r\n #For now just return the model in case we don't run it in a separate process\r\n if save_configuration == True:\r\n return model", "def _make_train_datasets(self):\n # Draw data from a random generator with a fixed seed to always get the\n # same data.\n rng = np.random.RandomState(42)\n train_x = rng.normal(0.0, self._noise_level, self._train_size)\n train_y = rng.normal(0.0, self._noise_level, self._train_size)\n train_x = np.float32(train_x)\n train_y = np.float32(train_y)\n train_data = self._make_dataset(train_x, train_y, shuffle=True)\n\n train_eval_data = train_data.take(self._train_size // self._batch_size)\n\n # Draw data from a random generator with a fixed seed to always get the\n # same data.\n rng = np.random.RandomState(44)\n valid_x = rng.normal(0.0, self._noise_level, self._train_size)\n valid_y = rng.normal(0.0, self._noise_level, self._train_size)\n valid_x = np.float32(valid_x)\n valid_y = np.float32(valid_y)\n valid_data = self._make_dataset(valid_x, valid_y, shuffle=False)\n\n return train_data, train_eval_data, valid_data", "def _create_data():\n tf.logging.info(\"Create records..\")\n train, val, test = util.load_data(data_dir, FLAGS[\"is_aug\"])\n tf.logging.info(\"Dataset size: Train-{} Test-{} Val-{}\".format(len(train), len(test), len(val)))\n return train, val, test", "def make_training_and_validation_data(\n self,\n BatchSize=100,\n TrainingSetInPercent=70,\n ValidationSetInPercent=30,\n NoBatches=False):\n\n if self._DataSet is None:\n raise Exception(\n \"No dataset initialized please call\\\n init_dataset first!\")\n if self._SymmFunSet is None:\n raise Exception(\n \"No symmetry function set initialized please call\\\n create_symmetry_functions or init_dataset first!\")\n\n\n if not NoBatches:\n # Get training data\n self.TrainingBatches = self.get_data(\n BatchSize, TrainingSetInPercent, NoBatches)\n # Get validation data\n self.ValidationBatches = self.get_data(\n BatchSize, ValidationSetInPercent, NoBatches)\n else:\n # Get training data\n temp = self.get_data(BatchSize, TrainingSetInPercent, NoBatches)\n self._TrainingInputs = temp[0][0]\n self._TrainingOutputs = temp[0][1]\n # Get validation data\n temp = self.get_data(BatchSize, ValidationSetInPercent, NoBatches)\n self._ValidationInputs = temp[0][0]\n self._ValidationOutputs = temp[0][0]", "def prepare_dataset(self, xFold_step, xFold_type):\n\n eval_samples_per_xfold = int(round((self.__train_size + self.__eval_size)/xFold_type))\n\n start_index = int(xFold_step*eval_samples_per_xfold)\n end_index = int(start_index + eval_samples_per_xfold)\n\n if end_index < len(self.__read_in_labels[-self.__test_size:]):\n end_index = len(self.__read_in_labels[-self.__test_size:])\n\n dataset = {\n \"x_train\": np.concatenate((self.__read_in_images[:start_index], self.__read_in_images[end_index:]), axis=0),\n \"y_train\": np.concatenate((self.__read_in_labels[:start_index], self.__read_in_labels[end_index:]), axis=0),\n\n \"x_eval\": self.__read_in_images[start_index:end_index],\n \"y_eval\": self.__read_in_labels[start_index:end_index],\n\n \"x_test\": self.__read_in_images[-self.__test_size:],\n \"y_test\": self.__read_in_labels[-self.__test_size:],\n }\n\n return dataset", "def _create_train_dataset(self, data_path):\n\n pass", "def _create_model_get_train_X_y(self, X_train, y_train):\n if X_train is not None:\n data_X = X_train.copy()\n else:\n if self.X_train is None:\n data_X = None\n else:\n data_X = self.X_train\n data_y = self.y_train if y_train is None else y_train.copy()\n return data_X, data_y", "def build_training_data(raw_training_data, tokenizer_hu):\n raw_train, raw_val = raw_training_data\n\n def encode(lang1):\n lang1 = [tokenizer_hu.vocab_size] + tokenizer_hu.encode(lang1.numpy()) + [tokenizer_hu.vocab_size+1]\n return lang1, [1, 2, 3]\n def tf_encode(hu):\n result_hu, _ = tf.py_function(encode, [hu], [tf.int64, tf.int64])\n result_hu.set_shape([None])\n\n return result_hu\n def cut_max_length(x, max_length=MAX_LENGTH):\n x = tf.pad(x, [[0, max_length]])\n x = tf.slice(x, [0], [max_length - 2])\n x = tf.pad(x, [[0, 2]])\n return x\n\n ds_train = raw_train.map(tf_encode)\n ds_train = ds_train.map(cut_max_length)\n ds_train = ds_train.cache()\n BUFFER_SIZE = 5000\n ds_train = ds_train.shuffle(BUFFER_SIZE).padded_batch(BATCH_SIZE)\n\n ds_val = raw_val.map(tf_encode)\n ds_val = ds_val.map(cut_max_length)\n ds_val = ds_val.padded_batch(BATCH_SIZE)\n return (ds_train, ds_val)", "def construct(data_dir, fname, X=None, normalize=False, _type='sparse'):\n if _type == 'sparse':\n return SparseFeatures(data_dir, fname, X, normalize)\n elif _type == 'dense':\n return DenseFeatures(data_dir, fname, X, normalize)\n elif _type == 'sequential':\n return SequentialFeatures(data_dir, fname, X)\n else:\n raise NotImplementedError(\"Unknown feature type\")", "def training_data(kind, depth = 5):\n\n if kind == 'unigram':\n return UnigramTrainingData.load(UNIGRAM_DIR + str(depth))\n\n if kind == 'rnn':\n return RNNTrainingData.load(RNN_DIR + str(depth))", "def _make_data():\n models = [ImageModel() for i in range(N_MODELS)]\n\n input_values = {\n 'meta.exposure.start_time': START_TIMES,\n 'meta.exposure.exposure_time': EXP_TIMES,\n 'meta.exposure.end_time': END_TIMES,\n 'meta.filename': FILENAMES,\n 'meta.instrument.coronagraph': CORONAGRAPHS,\n 'meta.instrument.name': INSTRUMENT_NAMES,\n 'meta.date': DATETIMES,\n 'meta.observation.date': DATES,\n 'meta.observation.date_beg': DATETIMES,\n }\n output_values = {\n 'meta.exposure.start_time': START_TIMES[0],\n 'meta.exposure.exposure_time': np.sum(EXP_TIMES),\n 'meta.exposure.end_time': END_TIMES[-1],\n 'meta.filename': FILENAMES[0],\n 'meta.instrument.coronagraph': CORONAGRAPHS[0],\n 'meta.instrument.name': INSTRUMENT_NAMES[0],\n 'meta.date': DATETIMES[0],\n 'meta.observation.date': DATES[1],\n 'meta.observation.date_beg': DATETIMES[1]\n }\n\n for i, model in enumerate(models):\n for attr in input_values:\n model[attr] = input_values[attr][i]\n\n return models, input_values, output_values", "def create_datasets(self):\n train = self.x[:self.trainsize+self.look_back]\n test = self.x[self.trainsize+1:]\n\n trainx, trainy = self.create_lookback_dataset(train)\n testx, testy = self.create_lookback_dataset(test)\n\n # segment the data based on traning style\n if self.train_style == 'sequential':\n trainx = self.chunk_data(trainx)\n trainy = self.chunk_data(trainy)\n elif self.train_style == 'random':\n print(\"'random' training not yet implemented.\")\n exit()\n elif self.train_style == 'overlap':\n trainx = [trainx[(i*self.shift):(self.base_size + (i*self.shift))]\n for i in range(self.num_segments)]\n trainy = [trainy[(i*self.shift):(self.base_size + (i*self.shift))]\n for i in range(self.num_segments)]\n else:\n print(\"Invalid training style for ensemble.\")\n exit()\n\n self.trainx, self.trainy = trainx, trainy\n self.testx, self.testy = testx, testy\n return", "def initialize_dataloaders(\n self, X: Union[np.ndarray, pd.DataFrame], y: Union[np.ndarray, np.array]\n ):\n training_design_matrix, training_targets_array, validation_design_matrix, validation_targets_array = self.generate_training_validation_split(\n X, y\n )\n training_dataloader_kwargs = {\n \"design_matrix\": training_design_matrix,\n \"targets_array\": training_targets_array,\n \"data_type\": self.data_type,\n \"batch_size\": self.batch_size,\n \"shuffle\": self.shuffle_training_examples,\n }\n validation_dataloader_kwargs = {\n \"design_matrix\": validation_design_matrix,\n \"targets_array\": validation_targets_array,\n \"data_type\": self.data_type,\n \"batch_size\": self.batch_size,\n \"shuffle\": False,\n }\n self.training_dataloader = self.generate_dataloader(**training_dataloader_kwargs)\n self.validation_dataloader = self.generate_dataloader(**validation_dataloader_kwargs)", "def build_training_data(data_dir, genres, width, height):\n training_data = []\n\n for genre in genres:\n current_genre_path = os.path.join(data_dir, genre)\n\n for root, dirs, files in os.walk(current_genre_path):\n for file in files:\n song_genre = file.split('.')[0]\n class_num = genres.index(song_genre)\n\n song_path = os.path.join(root, file)\n audio, sr = lr.load(song_path, sr=22050)\n\n spectrogram = lr.feature.melspectrogram(y=audio, sr=sr)\n img = cv2.resize(spectrogram, (width, height))\n training_data.append([img, song_genre])\n\n X = [img for img, _ in training_data]\n y = [label for _, label in training_data]\n\n return X, y", "def test_create_training_dataset(self):\n pass", "def define_training_data(self, train_sources, train_labels=None):\n logging.info(\"Defining training data for NNetModel...\")\n self.train_cols = []\n if train_labels is None:\n for source in train_sources:\n self.train_cols += self._read(source)\n else:\n for source, label in zip(train_sources, train_labels):\n self.train_cols += self._read(source, label)\n\n logging.info(\"NNetModel: Training data contains {} columns from {} sources\".format(len(self.train_cols), len(train_sources)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validates that Xs, Ys, Yvars, and metric names all have equal lengths.
def validate_data_format( Xs: List[Tensor], Ys: List[Tensor], Yvars: List[Tensor], metric_names: List[str] ) -> None: if len({len(Xs), len(Ys), len(Yvars), len(metric_names)}) > 1: raise ValueError( # pragma: no cover "Lengths of Xs, Ys, Yvars, and metric_names must match. Your " f"inputs have lengths {len(Xs)}, {len(Ys)}, {len(Yvars)}, and " f"{len(metric_names)}, respectively." )
[ "def _validateDim(self, obj1, obj2, errors, label1='Input 1', label2='Input 2'):\n if obj1 is not None and obj2 is not None:\n d1 = obj1.getXDim()\n d2 = obj2.getXDim()\n\n if d1 is None:\n errors.append(\"Can not get dimensions from %s.\" % label1)\n elif d2 is None:\n errors.append(\"Can not get dimensions from %s.\" % label2)\n elif d1 != d2:\n msg = '%s and %s have not the same dimensions, \\n' % (label1, label2)\n msg += 'which are %d and %d, respectively' % (d1, d2)\n errors.append(msg)", "def check_consistent_length(arrays: Sequence[npt.ArrayLike]) -> None:\n lengths = [_num_samples(X) for X in arrays if X is not None]\n uniques = np.unique(lengths)\n if len(uniques) > 1:\n raise ValueError(\n \"Found input variables with inconsistent numbers of\" \" samples: %r\" % [int(length) for length in lengths]\n )", "def _validate_length_features_and_labels(\n model_endpoint: mlrun.common.schemas.ModelEndpoint,\n ):\n\n # Getting the length of label names, feature_names and feature_stats\n len_of_label_names = (\n 0\n if not model_endpoint.spec.label_names\n else len(model_endpoint.spec.label_names)\n )\n len_of_feature_names = len(model_endpoint.spec.feature_names)\n len_of_feature_stats = len(model_endpoint.status.feature_stats)\n\n if len_of_feature_stats != len_of_feature_names + len_of_label_names:\n raise mlrun.errors.MLRunInvalidArgumentError(\n f\"The length of model endpoint feature_stats is not equal to the \"\n f\"length of model endpoint feature names and labels \"\n f\"feature_stats({len_of_feature_stats}), \"\n f\"feature_names({len_of_feature_names}),\"\n f\"label_names({len_of_label_names}\"\n )", "def validate_equal_length(*args):\n length = len(args[0])\n if any(len(lst) != length for lst in args):\n raise exceptions.PlotlyError(\"Oops! Your data lists or ndarrays \"\n \"should be the same length.\")", "def validate_input(self):\n self._validate_limits_cols_prefixed()\n self._validate_fillna_cols_prefixed()\n self._validate_ratio_input()", "def check_equal_length(para_name1, value1, para_name2, value2):\n if len(value1) != len(value2):\n msg = 'The dimension of {0} must equal to the {1}, but got {0} is {2}, {1} is {3}'\\\n .format(para_name1, para_name2, len(value1), len(value2))\n LOGGER.error(TAG, msg)\n raise ValueError(msg)\n return value1, value2", "def _check_variables(datasets, necessary_short_names):\n dataset_name = datasets[0]['dataset']\n necessary_short_names = set(necessary_short_names)\n short_names = set(group_metadata(datasets, 'short_name').keys())\n if short_names != necessary_short_names:\n raise ValueError(\n f\"Expected variables {necessary_short_names} for dataset \"\n f\"'{dataset_name}', got {short_names}\")", "def _validate_XY(X, Y):\n try:\n for inp in [X, Y]:\n assert isinstance(inp, torch.Tensor)\n assert inp.dtype is torch.float or inp.dtype is torch.double\n assert len(inp.shape) == 2\n assert X.dtype is Y.dtype\n assert X.shape[0] == Y.shape[0]\n except AssertionError:\n raise AttributeError(\n \"invalid inputs: X and Y should be float/double tensors of shape \"\n \"(n, d) and (n, m) respectively, where n is the number of samples, \"\n \"d is the number of features, and m is the number of outputs\"\n )", "def validate_ndarray(ndarray, expected_dtypes, expected_dimentions, name):\n\tvalid_dtype_assertion(expected_dtypes, ndarray.dtype, name)\n\tvalid_ndim_assertion(expected_dimentions, ndarray.ndim, name)", "def test_validate_data_label_shape_requirement(self):\n with self.assertRaises(ValueError):\n self.y = np.vstack([self.y, self.y])\n self.cl._validate_data(self.r, self.y)", "def _check_input(self, X, Y):\n if not isinstance(X, np.ndarray):\n raise ValueError(\"Type of X must be numpy.ndarray. \" +\n \"Received type {}.\".format(type(X)))\n if not isinstance(Y, np.ndarray):\n raise ValueError(\"Type of Y must be numpy.ndarray. \" +\n \"Received type {}.\".format(type(Y)))\n if len(X.shape) != 2 or X.shape[1] != self.input_dim:\n raise ValueError(\"Shape of X must be (n_obs, {}). \".format(self.input_dim) +\n \"Received shape {}.\".format(X.shape))\n if len(Y.shape) != 2 or Y.shape[1] != 1:\n raise ValueError(\"Shape of Y must be (n_obs, {}). \".format(self.input_dim) +\n \"Received shape {}.\".format(X.shape))\n if X.shape[0] != Y.shape[0]:\n raise ValueError(\"X and Y must contain equal number of observations \" +\n \"(X.shape[0]={}, Y.shape[0]={}).\".format(X.shape[0], Y.shape[0]))\n for x in X:\n if not self._within_bounds(x):\n raise ValueError(\"Location {} was not within model bounds.\".format(x))\n return X, Y", "def _check_lengths(v1, v2):\n\n if len(v1.coords) != len(v2.coords):\n raise Exception(\"Vectors are of different sizes\")", "def validate_dimensions(self):\n if self.X.shape[1] < self.embedding_dim:\n raise ValueError(\n \"Embedding dimension must be less or equal to the ambient \"\n \"dimension of input data\"\n )\n if self.geod_n_neighbors >= self.X.shape[0]:\n raise ValueError(\n \"Geodesic neighborhood size must be less than the \"\n \"total number of samples\"\n )\n if ((self.n_neighbors is not None) and\n (self.n_neighbors >= self.X.shape[0])):\n raise ValueError(\n \"kNN neighborhood size must be less than the \"\n \"total number of samples\"\n )\n if ((self.n_neighbors is not None) and\n (self.geod_n_neighbors < self.n_neighbors)):\n raise ValueError(\n \"Geodesic neighborhood size should be larger or equal to the \"\n \"n_neighbors\"\n )\n if self.geod_n_neighbors < self.embedding_dim:\n raise ValueError(\n \"Geodesic neighborhood size must be larger or equal to the \"\n \"embedding dimension\"\n )", "def _check_shape(self, X):\n return all([X.shape[i] == self.train_shape[i] for i in range(2)])", "def _check_input(data):\n T = len(data)\n data = np.array(data)\n dim = data[0].size if not np.isscalar(data[0]) else 1\n return data, T, dim", "def verify_types_for_layout(self):\n errors = []\n if not isinstance(self.name, str):\n errors.append(f'Expected self.name=\"{self.name}\" to be str')\n if not isinstance(self.dims, tuple):\n errors.append(f'Expected self.dims=\"{self.dims}\" to be tuple')\n if not isinstance(self.dims_dict, OrderedDict):\n errors.append(f'Expected self.dims_dict=\"{self.dims_dict}\" to be OrderedDict')\n if errors:\n formatted_errors = '\\n' + '\\n'.join(errors)\n raise RuntimeError(f'Found errors in data members:{formatted_errors}')", "def _verify_names(sampler, var_names, arg_names):\n num_vars = sampler.chain.shape[-1]\n # Get emcee version 2 sampler args, else get emcee version 3\n num_args = len(sampler.args) if hasattr(sampler, \"args\") else len(sampler.log_prob_fn.args)\n\n if var_names is None:\n var_names = [\"var_{}\".format(idx) for idx in range(num_vars)]\n if arg_names is None:\n arg_names = [\"arg_{}\".format(idx) for idx in range(num_args)]\n\n if len(var_names) != num_vars:\n raise ValueError(\n \"The sampler has {} variables, but only {} var_names were provided!\".format(\n num_vars, len(var_names)\n )\n )\n\n if len(arg_names) != num_args:\n raise ValueError(\n \"The sampler has {} args, but only {} arg_names were provided!\".format(\n num_args, len(arg_names)\n )\n )\n return var_names, arg_names", "def _validate_dimensionality(self):\r\n\r\n if self.time.ndim != 1:\r\n raise ValueError(\"time array must be one-dimensional\")\r\n npoints = self.data.shape[-1]\r\n if npoints != len(self.time):\r\n raise ValueError(\"mismatch of time and data dimensions\")", "def check_x_and_y_axis_len(self, x_axis, y_axis):\n if x_axis ==0: \n raise ValueError(\"Error! SOM X-Axis is 0!\")\n if y_axis==0:\n raise ValueError(\"Error! SOM Y-Axis is 0!\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract acquisition and optimizer options from `model_gen_options`.
def construct_acquisition_and_optimizer_options( acqf_options: TConfig, model_gen_options: Optional[TConfig] = None ) -> Tuple[TConfig, TConfig]: acq_options = acqf_options.copy() opt_options = {} if model_gen_options: acq_options.update( checked_cast(dict, model_gen_options.get(Keys.ACQF_KWARGS, {})) ) # TODO: Add this if all acq. functions accept the `subset_model` # kwarg or opt for kwarg filtering. # acq_options[SUBSET_MODEL] = model_gen_options.get(SUBSET_MODEL) opt_options = checked_cast( dict, model_gen_options.get(Keys.OPTIMIZER_KWARGS, {}) ).copy() return acq_options, opt_options
[ "def get_optimizer_experimental_options():\n return context.context().get_optimizer_experimental_options()", "def get_image_generator_from_options(options):\n #if options.dataset == 'robonet':\n if options.feature_extractor==\"spxl_segmenter\":\n params = {'reduce_features': True, 'small_reduction':True}\n elif options.feature_extractor==\"instance_segmenter\":\n params = {'reduce_features': True, 'small_reduction':False}\n elif options.feature_extractor==\"precropped\":\n params = {'reduce_features': False}\n else:\n raise Exception('Do not recognize given feature extractor!!')\n\n params['final_synth'] = options.final_synth\n return AdditionGenerator(**params)", "def _get_specific_options(cls, ops, params):\n\n # TODO: Check GPU option!\n ops[\"GPU\"] = params[\"useGPU\"] # whether to run this code on an Nvidia GPU (much faster, mexGPUall first)\n ops[\"parfor\"] = 0.0 # whether to use parfor to accelerate some parts of the algorithm\n ops[\"verbose\"] = 1.0 # whether to print command line progress\n ops[\"showfigures\"] = 0.0 # whether to plot figures during optimization\n\n ops[\"Nfilt\"] = params[\n \"Nfilt\"\n ] # number of clusters to use (2-4 times more than Nchan, should be a multiple of 32)\n ops[\"nNeighPC\"] = min(\n 12.0, ops[\"Nchan\"]\n ) # visualization only (Phy): number of channnels to mask the PCs, leave empty to skip (12)\n ops[\"nNeigh\"] = 16.0 # visualization only (Phy): number of neighboring templates to retain projections of (16)\n\n # options for channel whitening\n ops[\n \"whitening\"\n ] = \"full\" # type of whitening (default 'full', for 'noSpikes' set options for spike detection below)\n ops[\"nSkipCov\"] = 1.0 # compute whitening matrix from every N-th batch (1)\n ops[\n \"whiteningRange\"\n ] = 32.0 # how many channels to whiten together (Inf for whole probe whitening, should be fine if Nchan<=32)\n\n # ops['criterionNoiseChannels'] = 0.2 # fraction of \"noise\" templates allowed to span all channel groups (see createChannelMapFile for more info).\n\n # other options for controlling the model and optimization\n ops[\"Nrank\"] = 3.0 # matrix rank of spike template model (3)\n ops[\"nfullpasses\"] = 6.0 # number of complete passes through data during optimization (6)\n ops[\"maxFR\"] = 20000 # maximum number of spikes to extract per batch (20000)\n ops[\"fshigh\"] = params[\"freq_min\"] # frequency for high pass filtering\n ops[\"fslow\"] = params[\"freq_max\"] # frequency for low pass filtering (optional)\n ops[\"ntbuff\"] = params[\"ntbuff\"] # samples of symmetrical buffer for whitening and spike detection\n ops[\"scaleproc\"] = 200.0 # int16 scaling of whitened data\n ops[\"NT\"] = params[\"NT\"] # 32*1024+ ops.ntbuff;\n # this is the batch size (try decreasing if out of memory)\n # for GPU should be multiple of 32 + ntbuff\n\n # the following options can improve/deteriorate results.\n # when multiple values are provided for an option, the first two are beginning and ending anneal values,\n # the third is the value used in the final pass.\n ops[\"Th\"] = [4.0, 10.0, 10.0] # threshold for detecting spikes on template-filtered data ([6 12 12])\n ops[\"lam\"] = [5.0, 5.0, 5.0] # large means amplitudes are forced around the mean ([10 30 30])\n ops[\"nannealpasses\"] = 4.0 # should be less than nfullpasses (4)\n ops[\"momentum\"] = [1 / 20, 1 / 400] # start with high momentum and anneal (1./[20 1000])\n ops[\"shuffle_clusters\"] = 1.0 # allow merges and splits during optimization (1)\n ops[\"mergeT\"] = 0.1 # upper threshold for merging (.1)\n ops[\"splitT\"] = 0.1 # lower threshold for splitting (.1)\n\n ops[\"initialize\"] = \"fromData\" # 'fromData' or 'no'\n ops[\"spkTh\"] = -params[\"detect_threshold\"] # spike threshold in standard deviations (-6)\n ops[\"loc_range\"] = [3.0, 1.0] # ranges to detect peaks; plus/minus in time and channel ([3 1])\n ops[\"long_range\"] = [30.0, 6.0] # ranges to detect isolated peaks ([30 6])\n ops[\"maskMaxChannels\"] = 5.0 # how many channels to mask up/down ([5])\n ops[\"crit\"] = 0.65 # upper criterion for discarding spike repeates (0.65)\n ops[\"nFiltMax\"] = 10000.0 # maximum \"unique\" spikes to consider (10000)\n\n # options for posthoc merges (under construction)\n ops[\"fracse\"] = 0.1 # binning step along discriminant axis for posthoc merges (in units of sd)\n ops[\"epu\"] = np.Inf\n\n ops[\"ForceMaxRAMforDat\"] = 20e9 # maximum RAM the algorithm will try to use; on Windows it will autodetect.\n\n ## option for wavelength\n ops[\"nt0\"] = params[\n \"wave_length\"\n ] # size of the waveform extracted around each detected peak. Be sure to make it odd to make alignment easier.\n return ops", "def test_optimizer_kwargs():\n trainer = DirectClassifier(\n models=\"LR\",\n n_calls=2,\n n_initial_points=2,\n bo_params={\"acq_func\": \"EI\"},\n random_state=1,\n )\n trainer.run(bin_train, bin_test)\n assert trainer._bo_kwargs.get(\"acq_func\") == \"EI\"", "def parse_options(parser):\n TensorflowModel.parse_options(parser)\n parser.add_argument('--input-dim', type=int, default=160)\n parser.add_argument('--input-len', type=int, default=7501)\n parser.add_argument('--output-len', type=int, default=7501)\n parser.add_argument('--conv-layer-num', type=int, default=2)\n parser.add_argument('--conv-kernel-num', type=int, default=1)\n parser.add_argument('--conv-kernel-len', type=int, default=512)", "def get_optimizer():\n ##################\n # YOUR CODE HERE #\n ##################", "def algo_options(alg):\n origin, algo_name = alg.split(\"_\", 1)\n if origin == \"pygmo\":\n if algo_name in [\"ihs\"]:\n algo_options = {\"popsize\": 1, \"gen\": 1000, \"seed\": 123}\n elif algo_name in [\"sea\"]:\n algo_options = {\"popsize\": 5, \"gen\": 7000, \"seed\": 1234}\n else:\n algo_options = {\"popsize\": 30, \"gen\": 150, \"seed\": 12345}\n else:\n algo_options = {}\n\n return algo_options", "def optimizer_kwargs(parsed_args):\n return {\n 'optim': parsed_args.optim,\n 'lr': parsed_args.lr,\n 'weight_decay': parsed_args.weight_decay,\n 'momentum': parsed_args.momentum,\n 'sgd_dampening': parsed_args.sgd_dampening,\n 'sgd_nesterov': parsed_args.sgd_nesterov,\n 'rmsprop_alpha': parsed_args.rmsprop_alpha,\n 'adam_beta1': parsed_args.adam_beta1,\n 'adam_beta2': parsed_args.adam_beta2\n }", "def get_options(self) -> Dict[str, Any]:", "def _get_optimizer(self):\n raise NotImplementedError", "def get_optimizer(model, args):\n if args.gpu_ids:\n train_params = model.module.get_train_parameters(args.lr)\n else:\n train_params = model.get_train_parameters(args.lr)\n\n if args.optim == SGD:\n optimizer = optim.SGD(train_params, lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum, nesterov=True)\n elif args.optim == ADAM:\n optimizer = optim.Adam(train_params, lr=args.lr, weight_decay=args.weight_decay)\n elif args.optim == AMSGRAD:\n optimizer = optim.Adam(train_params, lr=args.lr, weight_decay=args.weight_decay, amsgrad=True)\n else:\n raise NotImplementedError\n\n return optimizer", "def gather_options(self):\n if not self.initialized: # check if it has been initialized\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser = self.initialize(parser)\n\n # get the basic options\n opt, _ = parser.parse_known_args()\n\n # modify model-related parser options\n model_name = opt.model\n model_option_setter = models.get_option_setter(model_name)\n parser = model_option_setter(parser, self.isTrain)\n opt, _ = parser.parse_known_args() # parse again with new defaults\n\n # modify dataset-related parser options\n dataset_name = opt.dataset_mode\n dataset_option_setter = data.get_option_setter(dataset_name)\n parser = dataset_option_setter(parser, self.isTrain)\n\n # save and return the parser\n self.parser = parser\n return parser.parse_args()", "def get_simulation_options(self):\n return self.opts", "def required_optimizer_arguments(self):\n\t\treturn {\n\t\t\t\"momentum\": [\"momentum\"]\n\t\t}", "def optimizer_kwargs(cfg):\n kwargs = dict(\n optimizer_name=cfg.opt,\n learning_rate=cfg.lr,\n weight_decay=cfg.weight_decay,\n momentum=cfg.momentum)\n if getattr(cfg, 'opt_eps', None) is not None:\n kwargs['eps'] = cfg.opt_eps\n if getattr(cfg, 'opt_betas', None) is not None:\n kwargs['betas'] = cfg.opt_betas\n if getattr(cfg, 'opt_args', None) is not None:\n kwargs.update(cfg.opt_args)\n return kwargs", "def next_tune_cfg(self):\n # generate tuning space according to user chosen tuning strategy\n\n while True:\n op_cfgs = {}\n op_cfgs['calib_iteration'] = int(np.random.choice(self.calib_iter))\n op_cfgs['op'] = {}\n for op, configs in self.opwise_quant_cfgs.items():\n cfgs_len = len(configs)\n if cfgs_len > 0:\n op_cfgs['op'][op] = configs[np.random.choice(cfgs_len)]\n else:\n op_cfgs['op'][op] = self.opwise_tune_cfgs[op][np.random.choice(\n len(self.opwise_tune_cfgs[op]))]\n\n yield op_cfgs", "def get_optimizer(self, model_name):\n return self.optimizers[model_name]", "def ExtraOptions():", "def extra_options():\n extra_vars = {\n 'shared_libs': [None, \"Deprecated. Use build_shared_libs\", CUSTOM],\n 'openmp': [True, \"Enable OpenMP support\", CUSTOM],\n 'all_exts': [True, \"Enable all Trilinos packages\", CUSTOM],\n 'skip_exts': [[], \"List of Trilinos packages to skip\", CUSTOM],\n 'verbose': [False, \"Configure for verbose output\", CUSTOM],\n }\n return CMakeMake.extra_options(extra_vars)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the hash digest as a bytes object. This is the bigendian representation of the value returned by ``intdigest()`` and is equivalent to the output of the ``XXH64_canonicalFromHash()`` function in the `reference implementation`_ applied to the value returned by ``intdigest()``.
def digest(self): # For discussion of big-endian vs little-endian for the hash # digest of XXHASH algorithms, see # https://github.com/Cyan4973/xxHash/issues/45 return struct.pack(">Q", self.intdigest())
[ "def hexdigest(self):\n\n d = map(None, self.digest())\n d = map(ord, d)\n d = map(lambda x:\"%02x\" % x, d)\n d = ''.join(d)\n\n return d", "def digest(self):\n return self._hash", "def bytes_from_string_digest(cls, hash_string: str) -> bytes:\n pass", "def digest(self):\n return struct.pack('>L', self._crc)", "def hexdigest(self):\n return self._digest.hexdigest()", "def hexdigest(self):\r\n return ''.join(['%02x' % ord(c) for c in self.digest()])", "def hexdigest(self):\n return '%08x%08x%08x%08x%08x' % self._process_digest()", "def hexdigest(self):\n # bytes.hex() is simpler, but not available For Python <= 3.4\n return \"\".join(\"{0:0>2x}\".format(b) for b in self.digest())", "def getHash(self):\n return sha.sha(self.serial).digest()", "def digest(self, message):\n\n hasher = hashlib.md5()\n hasher.update(message)\n digest = hasher.digest()[0:self.HASHLEN]\n\n return binascii.hexlify(digest)", "def hash(self):\n return hash256(self.serialize())[::-1]", "def calc_statistics_hash(self) -> bytes:\n return b\"somehash\"", "def bin_hash(self):\n return self._bin_hash", "def getHash(self):\n f = StringIO()\n self.saveBinary(f)\n m = md5.new()\n m.update(f.getvalue())\n return m.hexdigest()", "def digest(self, *args):\n return self._hash.digest(*args)", "def binsha(self):\r\n return self[1]", "def incore_digest(self):\n return hasher(self.content).hexdigest()", "def digest(o):\n ser = serialize(o)\n return _truncated_digest(ser.encode(enc)).decode(enc)", "def compute_hash(self):\n block_string = json.dumps(self.storable, sort_keys=True)\n return sha256(block_string.encode()).hexdigest()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the hash digest as a string of hexidecimal digits. This is the value returned by ``digest()`` expressed as a printable hex string for easy display.
def hexdigest(self): # bytes.hex() is simpler, but not available For Python <= 3.4 return "".join("{0:0>2x}".format(b) for b in self.digest())
[ "def hexdigest(self):\n\n d = map(None, self.digest())\n d = map(ord, d)\n d = map(lambda x:\"%02x\" % x, d)\n d = ''.join(d)\n\n return d", "def hexdigest(self):\r\n return ''.join(['%02x' % ord(c) for c in self.digest()])", "def hexdigest(self):\n return '%08x%08x%08x%08x%08x' % self._process_digest()", "def hexdigest(self):\n return self._digest.hexdigest()", "def hex(self) -> str:\n return self.__hash.hexdigest()", "def digest(self):\n # For discussion of big-endian vs little-endian for the hash\n # digest of XXHASH algorithms, see\n # https://github.com/Cyan4973/xxHash/issues/45\n return struct.pack(\">Q\", self.intdigest())", "def hexdigest(self, *args):\n return self._hash.hexdigest(*args)", "def string_digest(self) -> str:\n pass", "def digest(self) -> str:\n _args: list[Arg] = []\n _ctx = self._select(\"digest\", _args)\n return _ctx.execute_sync(str)", "def hex_form(hash):\n final_hash = ''\n for i in range(len(hash)):\n final_hash += format(hash[i], '02x')\n return final_hash", "def hexsha(self):\r\n return b2a_hex(self[1])", "def digest(self):\n return self._hash", "def HexDigest(self, name, truncation_length=None):\n\n if truncation_length is None:\n truncation_length = 64\n name_bytes = name.encode('UTF-8')\n return hashlib.sha256(name_bytes).hexdigest()[:truncation_length]", "def get_file_hash_hex(self, file_name):\n list_hash = self.calculate_file_hash(file_name)\n return ''.join([\"%02X \" % x for x in list_hash])", "def hex(self) -> str:\n return hashlib.md5(str(self.path).encode()).hexdigest()", "def createHashcodeString(digest):\n map_num2hex = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\"]\n hashcodelist = [None] * len(digest)\n \n for i1 in range(0, len(digest)):\n digest_i = digest[i1] # Extracts the number from the digest.\n hashcodelist[i1] = map_num2hex[digest_i] # Turns the number to a hex value and assigns it to the hashcodelist.\n \n hashcodestring = \"\"\n \n for i1 in range(0, len(hashcodelist)):\n hashcodestring = hashcodestring + hashcodelist[i1] # Appends the characters to form a string.\n \n return hashcodestring", "def hash_string(self):\n return self._hash_string", "def getFingerprint(self):\r\n return b2a_hex(SHA1(self.bytes))", "def getHash(self):\n return sha.sha(self.serial).digest()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the specified instance matches the service's model.
def _isinstance(self, instance, raise_error=True): if isinstance(instance, self.__model__): return True elif raise_error: raise ValueError('{} is not of type {}.'.format( instance, self.__model__, )) else: return False
[ "def is_instance_of_model(obj, model):\r\n return (isinstance(obj, model)\r\n or type(obj) is model\r\n or model in obj.__class__.__bases__)", "def checkModel(self, model):\n # TODO", "def instance_exists(self, instance):\n pass", "def is_model_instance(self):\n return self.get_aggregation_class_name() == 'ModelInstanceLogicalFile'", "def is_exact_match(self, instance):\n for attr in set(instance).union(set(self.attrs())):\n if attr[0] == '_':\n continue\n if attr in instance and attr not in self.av_counts:\n return False\n if attr in self.av_counts and attr not in instance:\n return False\n\n return True", "def can_use_model(self, model):\n pass", "def model_equal(self, anki_note: Note) -> bool:\n model = anki_note.model()\n assert model is not None, \"Anki note passed does not have a model!\"\n assert self.model is not None, \\\n f\"Class {self.__class__} does not specify a 'model' attribute.\"\n return model['name'] == self.model.name", "def _isinstance(self, model, raise_error=True):\n rv = isinstance(model, self.__model__)\n if not rv and raise_error:\n raise ValueError('%s is not of type %s' % (model, self.__model__))\n return rv", "def test_instance_BaseModel(self):\n self.assertTrue(isinstance(self.my_object, BaseModel))", "def hasModel(self, model):\n if model in self.models:\n return S_OK()\n else:\n return S_ERROR(\"Model %s is not defined, use any of %s\" % (model, self.models.keys()))", "def find_model_instance_else_add(self, model_obj):\n\n if not getattr(model_obj, \"model_instance_uuid\", None):\n # check that the model is registered with the model registry.\n if not hasattr(model_obj, \"model_uuid\") and not hasattr(\n model_obj, \"model_alias\"\n ):\n raise AttributeError(\n \"Model object does not have a 'model_uuid'/'model_alias' attribute. \"\n \"Please register it with the Validation Framework and add the 'model_uuid'/'model_alias' to the model object.\"\n )\n if not hasattr(model_obj, \"model_version\"):\n raise AttributeError(\n \"Model object does not have a 'model_version' attribute\"\n )\n\n model_instance = self.get_model_instance(\n model_id=getattr(model_obj, \"model_uuid\", None),\n alias=getattr(model_obj, \"model_alias\", None),\n version=model_obj.model_version,\n )\n if not model_instance: # check if instance doesn't exist\n # if yes, then create a new instance\n model_instance = self.add_model_instance(\n model_id=getattr(model_obj, \"model_uuid\", None),\n alias=getattr(model_obj, \"model_alias\", None),\n source=getattr(model_obj, \"remote_url\", \"\"),\n version=model_obj.model_version,\n parameters=getattr(model_obj, \"parameters\", \"\"),\n )\n else:\n model_instance = self.get_model_instance(\n instance_id=model_obj.model_instance_uuid\n )\n return model_instance", "def __instancecheck__(self, instance):\n\n if isinstance(instance, ObjCInstance):\n return bool(instance.isKindOfClass(self))\n else:\n return False", "def conforms(self, instance, format):\n\n try:\n self.check(instance, format)\n except FormatError:\n return False\n else:\n return True", "def is_instance_okay(self):\n\n return self.instance_okay", "def __instancecheck__(self, instance):\n\n if isinstance(instance, ObjCInstance):\n return bool(instance.conformsToProtocol(self))\n else:\n return False", "def test_instance(self):\n self.assertEqual(True, type(self.Test.defined_associations['thing']) is pyperry.association.HasOne)", "def conforms(self, instance, format):\r\n\r\n try:\r\n self.check(instance, format)\r\n except FormatError:\r\n return False\r\n else:\r\n return True", "def match(self, cls):\n return isinstance(self, cls)", "def isInstance(self):\r\n return self._wrap(type(self.obj) is InstanceType)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts the provided integer 'n' into a valid insertion point in the string 's', ie the current index locations or at the end
def gen_index_via_mod(s, n): if len(s) == 0: return 0 return n % (len(s) + 1)
[ "def insert_newlines(s, n):\n \n i = n\n while i<len(s):\n s.insert(i, '\\n')\n i += n+2", "def move_to_end(s, n):\n first=s[0:n]\n return s[n:] + first", "def insert(t, n):\n return t[:-1] + (n, t[-1])", "def esrever2(n, s):\n if n == 0:\n return s\n else:\n result = esrever2(n // 10, s * 10 + n % 10)\n return result", "def insert_number(n, m, i, j):\n n = str(n)\n m = str(m)\n\n n_beginning = n[:-(j + 1)]\n n_end = n[-i:]\n\n new_number = n_beginning + m + n_end\n\n return int(new_number)", "def add_space(s,n):\n t = \"\"\n for i in xrange(len(s)):\n # Add white space after every n characters.\n if i % n == 0 and i != 0:\n t += ' '\n t += s[i]\n\n return t", "def find_nth(string, sub, n):\n\n # set dummy variable\n if sub[0] == 'i':\n replacer = 'j'\n else:\n replacer = 'i'\n\n # loop through n, find substring index, modify string, decrement n \n while n > 0:\n sub_idx = string.find(sub)\n string = string[:sub_idx] + replacer + string[sub_idx + 1 :]\n n -= 1\n\n return sub_idx", "def insertnln(n=1):\r\n\tidx = 0\r\n\twhile idx < n:\r\n\t\tCONSOLE.insertln()\r\n\t\tidx = idx + 1", "def left_fill(s, n, x=\"0\"):\n sl = len(s)\n zn = n - sl\n if zn > 0:\n return zn*\"0\" + s\n else:\n return s", "def generateTheString(n):\n \n if n % 2 == 0:\n return \"a\" * (n - 1) + \"b\"\n \n else:\n return \"a\" * n", "def _get_nth(self, n):\n return self.start + ((n - 1) * self.delta)", "def layout_string(n):\n return \"{0}x{1}\".format((n / 2) + (n % 2), 1 if n == 1 else 2)", "def increment(s):\n m = lastNum.search(s)\n if m:\n next = str(int(m.group(1))+1)\n start, end = m.span(1)\n s = s[:max(end-len(next), start)] + next + s[end:]\n return s", "def missing_char(str, n):\r\n if n<=len(str):\r\n str = str.replace(str[n], \"\")\r\n return str", "def every_nth(s, n, o = 0):\n\tl = len(s)\n\treturn ''.join(s[o : l : n])", "def replace_digit(n, d, pos):\n n = str(n)\n\n for p in pos:\n if p > len(n):\n raise ValueError(\"Position to large for number\")\n n = n[:len(n)-p] + str(d) + n[len(n)-p+1:]\n\n return int(n)", "def get_string(self, n):\n pad = self.get_pad(n)\n string = pad + self.word\n string += \"\\n\" + self.children[0].get_string(n + 1)\n string += \"\\n\" + self.children[1].get_string(n + 1)\n return string", "def __add__(self, n: int) -> 'SourcePos':\n if n < 0:\n return self - abs(n)\n original_n = n\n line = self.file_content.lines[self.line_i]\n remaining_chars = len(line.s(self.form)) - self.col_i\n if n <= remaining_chars:\n return SourcePos(\n self.file_content,\n self.line_i,\n self.col_i + n,\n self.form\n )\n else:\n n -= remaining_chars\n for line in self.file_content.lines[self.line_i + 1:]:\n line_chars = len(line.s(self.form))\n if n <= line_chars:\n return SourcePos(self.file_content, line.index, n, self.form)\n n -= line_chars\n else:\n raise ValueError(\n f'Cannot add {original_n} to {self}. '\n f'{original_n} is too large.')", "def _n_spaces(n):\n\n return int(n) * ' '" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets all announcements on the server
def get(self): announcements = Announcement.query.all() announcements = announcements_schema.dump(announcements) if not announcements: return {'status': 'success', 'announcements': announcements}, 206 # Partial Content Served return {'status': 'success', 'announcements': announcements}, 200
[ "def do_list(client):\n response = client.cmd('announce/list').json()\n lines = []\n for i, announcement in enumerate(response):\n lines.append(\"[{}] {}\".format(i, announcement['text']))\n print '\\n'.join(lines)", "def getServiceAnnouncements():\r\n soapheader['SOAPAction'] = '/getServiceAnnouncements'\r\n sendMessage(getAnn)\r\n printResponse()", "async def getAnnouncements(self, body=\"\"):\n payload = {}\n \n # Parameter validation\n schema = ContentValidator.getAnnouncements()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(api_url=self._urls[\"getAnnouncements\"], proccessed_params=\"\"\"{\"required\":[],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[]}\"\"\", )\n query_string = await create_query_string()\n headers = {\n \"Authorization\": \"Bearer \" + base64.b64encode(\"{}:{}\".format(self._conf.applicationID, self._conf.applicationToken).encode()).decode()\n }\n if self._conf.locationDetails:\n headers[\"x-location-detail\"] = ujson.dumps(self._conf.locationDetails)\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(urlparse(self._urls[\"getAnnouncements\"]).netloc, \"get\", await create_url_without_domain(\"/service/application/content/v1.0/announcements\", ), query_string, headers, body, exclude_headers=exclude_headers), data=body, cookies=self._conf.cookies)", "def announce_urls(self) -> Optional[List[List[str]]]:\n urls = self._struct.get('announce-list')\n\n if not urls:\n urls = self._struct.get('announce')\n if not urls:\n return []\n urls = [[urls]]\n\n return urls", "def list(self, irc, msg, args, channel):\n announce = conf.supybot.plugins.RSS.announce\n feeds = format('%L', list(announce.get(channel)()))\n irc.reply(feeds or 'I am currently not announcing any feeds.')", "def harvest_all():\n db = db_connect()\n dois = None\n get_commits()\n get_citations(db, dois)\n get_mentions(since_version=None)\n list_records(dois)", "def get_announcements(self, factory: 'AnnouncementFactory') -> 'AnnouncementCollection':\n collection = factory.get_announcement_collection(self.get_announcement_data_list())\n return collection", "def initialize_dsit_announcements():\n announcements_url = bot_settings['announcements']['url_DSIT']\n announcements = None\n try:\n logging.debug('Requesting announcements from DSIT announcement page')\n announcements = parse_dsit_announcements(announcements_url)\n logging.debug('Successfully parsed DSIT announcements')\n except requests.exceptions.RequestException:\n logging.error(f'Could not reach DSIT announcement page using {announcements_url}')\n\n return announcements", "def getEntertainments(self):\n entertainments = []\n\n s = requests.get(self.__data['links']['entertainments']['href'], headers=getHeaders())\n data = json.loads(s.content)\n\n for enter in data['entries']:\n try:\n entertainments.append(Entertainment(enter['links']['self']['href'].split('/')[-1].split('?')[0]))\n except:\n pass\n return entertainments", "def announce(self):\n m = rtorrent9.rpc.Multicall(self)\n self.multicall_add(m, \"d.tracker_announce\")\n\n return m.call()[-1]", "def retrieve_all_articles(client: Client) -> List[AirbyteRecordMessage]:\n all_records = client.client.data_object.get(class_name=\"Article\", )\n out = []\n for record in all_records.get(\"objects\"):\n props = record[\"properties\"]\n out.append(_article_record(\"Article\", props[\"title\"], props[\"wordCount\"]))\n out.sort(key=lambda x: x.record.data.get(\"title\"))\n return out", "def all(): \n res = requests.get(\"https://minehub.cz/api/posts/\")\n return res.json()", "def getAllNews(self) -> list:\n try:\n allNews = self.apiNews.getNews() + self.redditNews.getNews()\n log.debug('Aggregated responses from all supported third party APIs')\n return allNews\n except Exception as e:\n log.error('Newstore Exception:' + str(e))\n raise Exception(\"Newstore Exception: {}.\".format(str(e)))", "def list(self):\n return JSONResponse(self.request).data(items=self._get_agenda_items()).dump()", "def parse_dsit_announcements(url):\n try:\n resp = requests.get(url)\n resp.raise_for_status() # Raising request exception if status code is not 200\n except requests.exceptions.RequestException as e:\n logging.error('Failed to reach DSIT announcements page')\n raise e\n\n announce_soup = BeautifulSoup(resp.text, features=\"html.parser\")\n articles = announce_soup.find_all('article')\n\n def parse_announcement(announcement):\n # Date parsing\n date_str = announcement.find('span', attrs={'class': 'updated'})\n\n # The actual time we get from the DSIT announcement page is in Athens timezone, however the provided one,\n # using %z, is UTC. We fix that using .replace\n date_time_obj = datetime.datetime.strptime(date_str.text, '%Y-%m-%dT%H:%M:%S%z') \\\n .replace(tzinfo=pytz.timezone('Europe/Athens'))\n\n # Dates are surprisingly not ordered since we extract the last update time and not posting time\n converted_datetime = date_time_obj.strftime(\"%d-%m-%Y %H:%M\")\n\n # Announcement link parsing\n href = announcement.find_all('a')[1]['href']\n\n # Title Parsing\n title = announcement.find_all('a')[1].contents[0]\n\n return title, converted_datetime, href, \"DSIT\"\n\n announcements = [Announcement(*parse_announcement(article)) for article in articles]\n\n return announcements", "def pull_articles(self, *args, **kwargs):\n tasks.pull_articles()\n return Response({})", "def fetch_entries(self):\n entries = []\n rss_list = self.__data_adapter.fetch_rss()\n for rss in rss_list:\n rss_href = rss.get('url', None)\n if rss_href is not None:\n feed = feedparser.parse(rss_href)\n [entries.append(FeedDocument(entry.get('title', ''), entry.get('summary', ''))) for entry in feed.get('entries', [])]\n return entries", "def get(self, request):\n announcement_id = request.GET.get(\"id\")\n if announcement_id:\n try:\n announcement = Announcement.objects.get(id=announcement_id)\n return self.success(AnnouncementSerializer(announcement).data)\n except Announcement.DoesNotExist:\n return self.error(\"Announcement does not exist\")\n announcement = Announcement.objects.all().order_by(\"-create_time\")\n if request.GET.get(\"visible\") == \"true\":\n announcement = announcement.filter(visible=True)\n return self.success(self.paginate_data(request, announcement, AnnouncementSerializer))", "def getall():\n elements = Advertisements().get_all_elements()\n data = jsonify(elements)\n data.statut_code = 200\n return data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
delete a announcement by ID
def delete(self, announcementID): announcement = Announcement.query.filter_by(announcementID=announcementID) if not announcement.first(): return {'status': 'fail', 'message': 'No announcement with ID ' + str(announcementID) + ' exists'}, 404 announcement.delete() db.session.commit() return {'status': 'sucess', 'message': 'Announcement Deleted'}, 200
[ "def delete(self, _id):", "def delete(self,note_id):", "def delete(self, id):\n delete_entry(id)\n return None, 204", "def deleteEntry(entry_id):", "def delete_incident(self, id):\n sql = f\"DELETE FROM incidences WHERE incidences.id ={id}\"\n conn = Db().con\n curr = conn.cursor()\n curr.execute(sql)\n conn.commit()", "def delete_object(self, id):\r\n self.request(id, post_args={\"method\": \"delete\"})", "def delete_by_id(self, subject_id: str) -> any:\n pass", "def delete_object(self, id):\n self.request(id, post_args={\"method\": \"delete\"})", "def delete(self, id=None):\n\n if not id:\n return {'msg':'Missing achievement id.'}, 400\n\n try:\n ach = AcademicAchievement.query.get(id)\n\n if not ach:\n return {'msg':'Academic achievement not found'}, 404\n\n ach.remove()\n return {'msg':'Academic achievement deleted.'}, 200\n\n except Exception as e:\n print(e)\n return {'msg':'Could not delete academic achievement.'}, 500", "def delete(id):\n elementFromDB = Advertisements().get_one_element(id)\n if elementFromDB is None:\n return abort(500, \"L'élément n'existe pas.\")\n else:\n try:\n elements = Advertisements().delete_element(id)\n result = jsonify(elements)\n result.statut_code = 200\n return result\n except Exception as identifier:\n return abort(500, identifier)", "def delete_announcement_staff(request, announcement_id):\r\n # get announcement by its id.\r\n announcement = get_object_or_404(Announcement, pk=announcement_id)\r\n # get current staff.\r\n staff = get_object_or_404(Staff, user__id=request.user.id)\r\n data = dict()\r\n if announcement.staff == staff:\r\n if request.method == 'POST':\r\n # delete announcement.\r\n announcement.delete()\r\n data['form_is_valid'] = True \r\n # get all announcements. \r\n announcements = Announcement.objects.all()\r\n context = {'announcements': announcements, 'staff':staff}\r\n data['html_announcement_list'] = render_to_string('staff/includes/partial_announcement_list.html', context)\r\n else:\r\n context = {'announcement': announcement}\r\n data['html_form'] = render_to_string('staff/includes/partial_announcement_delete.html', context, request=request)\r\n return JsonResponse(data)", "def deleteById(self, id):\n pessoa = PessoaService()\n pessoa.deleteById(id)\n return", "def delete_alarm(alarm_id):", "def delete(identifier):", "def delete(self):\n args = {\"id\": self.id}\n owner = self.task.project.owner\n _perform_command(owner, \"note_delete\", args)", "def delete_entry(self, id, **args):\n args.update(id=id)\n return self.fetch(\"/entry/delete\", post_args=args)", "def delete(self, id):\n return delete_alert(id)", "def delete_object(self, id):\n return self.request(\n \"{0}/{1}\".format(self.version, id), method=\"DELETE\"\n )", "def delete(id_patient: str):\n database = get_connection()\n col = database.patients\n query = {\"patient_data.id\": id_patient}\n col.delete_one(query)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that converts category name to Python module name Eg. rwgeneric to RwGenericYang
def get_module_name_from_log_category(log_category): words = log_category.split('-') words.append('yang') return ''.join(word.capitalize() for word in words)
[ "def normalize_module_name(layer_name):\n modules = layer_name.split('.')\n try:\n idx = modules.index('module')\n except ValueError:\n return layer_name\n del modules[idx]\n return '.'.join(modules)", "def get_prettified_module_name(module_name: str):\n module_name = module_name.lower().strip()\n if module_name == \"nlp\":\n return \"NLP\"\n else:\n return module_name.capitalize()", "def normalize_module_name(name):\n if name.endswith('.py'):\n name = name[:-3]\n return name.replace('/', '.')", "def get_module_name(platform, module_name):\n platform_desc = platforms[platform]\n if MODULE_CASE in platform_desc:\n func = platform_desc[MODULE_CASE]\n return func(module_name)\n return module_name", "def get_class_name_from_module_name(module_name):\n return module_name[0].upper() + (module_name[1:]).rstrip('1234567890')", "def pkg_path_2_category(pkg_path):\n if pkg_path.startswith('services/'):\n return 'services'\n elif pkg_path.startswith('examples/'):\n return 'examples'\n elif pkg_path.startswith('tests/'):\n return 'tests'\n elif pkg_path.startswith('tools/'):\n return 'tools'\n elif pkg_path == 'cli':\n return 'cli'\n elif pkg_path == 'dcap':\n return 'dcap'\n else:\n sys.stderr.write(\n '[Error]: Unknown category for package_path {}\\n'.format(pkg_path))\n sys.exit(-1)", "def get_module_short_name(klass):\n return klass.__module__.rsplit('.', 1)[-1]", "def to_type_name(name):\n return \"\".join(part[:1].upper() + part[1:] for part in name.split(\"_\"))", "def get_module_naming_scheme():\n return ConfigurationVariables()['module_naming_scheme']", "def sanitize_module_name(module_name):\n module_name = module_name.replace('-', '_').replace('.', '_')\n if module_name[0] not in string.ascii_letters:\n module_name = \"a\" + module_name\n return module_name", "def get_bot_module_name(bot_name):\n return '_'.join([part.lower() for part in _split_bot_name_to_words(bot_name)])", "def parse_category_label(label: str) -> str:\n return number_first_regex.sub(\n '_',\n space_regex.sub(\n '_',\n label.strip().lower().replace('*', '').replace('(', '').replace(\n ')', '').replace('.', '')))", "def new_cat_name(prefix=\"mittens\"):\n return \"%s%d\" % (prefix, random.randint(0, 9999))", "def create_importable_name(charm_name):\n return charm_name.replace(\"-\", \"_\")", "def _make_class_name(name):\n return name[0].upper() + name[1:] + \"Ufunc\"", "def _ros2_type_to_type_name(ros2_type):\n try:\n first_dot = ros2_type.__module__.find(\".\")\n return ros2_type[0:first_dot] + \"/\" + ros2_type.__name__\n except:\n # this shouldn't happen but try harder, don't crash the robot for something silly like this\n return str(ros2_type).replace(\"<class '\", \"\").replace(\"'>\", \"\")", "def _get_module_name(filename: str) -> str:\n return \".\".join(_get_relative(filename).split(os.path.sep)[2:]).replace(\".pyi\", \"\").replace(\".__init__\", \"\")", "def __create_classname(self, fullname):\n return PACKAGE_NAME + \".\" + fullname", "def cwl_name(name):\n name = name.replace('_', '-')\n\n return name" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set Log category name to be used.
def set_category(self, category_name): try: module_name = get_module_name_from_log_category(category_name) log_yang_module = importlib.import_module('gi.repository.' + module_name) if not log_yang_module: logger.error("Module %s is not found to be added as log category for %s", module_name, category_name) print("Module %s is not found to be added as log category for %s", module_name, category_name) return for level in RwLogger.level_event_cls_map.values(): if not hasattr(log_yang_module, level): logger.error("Module %s does not have required log notification for %s", module_name, level) print("Module %s does not have required log notification for %s", module_name, level) return self._log_yang_module = log_yang_module self._log_category_name = category_name except Exception as e: logger.exception("Caught error %s when trying to set log category (%s)",repr(e), category_name)
[ "def set_category(self, category_name):\n if category_name == self._category:\n return\n\n try:\n module_name = get_module_name_from_log_category(category_name)\n\n gi.require_version(module_name, '1.0')\n log_yang_module = importlib.import_module('gi.repository.' + module_name)\n\n if not log_yang_module:\n logger.error(\"Module %s is not found to be added as log category for %s\", module_name, category_name)\n print(\"Module %s is not found to be added as log category for %s\", module_name, category_name)\n return\n for level in RwLogger.level_event_cls_map.values():\n if not hasattr(log_yang_module, level):\n logger.error(\"Module %s does not have required log notification for %s\", module_name, level)\n print(\"Module %s does not have required log notification for %s\", module_name, level)\n return\n self._log_yang_module = log_yang_module\n self._category = category_name\n\n except Exception as e:\n logger.exception(\"Caught error %s when trying to set log category (%s)\",\n repr(e), category_name)", "def category_name(self, category_name):\n\n self._category_name = category_name", "def rider_category_name(self, rider_category_name):\n\n self._rider_category_name = rider_category_name", "def incident_product_category_name(self, incident_product_category_name):\n self._incident_product_category_name = incident_product_category_name", "def set_scribe_category(category):\r\n LogOptions._SCRIBE_CATEGORY = category", "def category_name(self):\r\n return conf.lib.clang_getDiagnosticCategoryName(self.category_number)", "def category(self, category):\n self._category = category", "def category_name(self):\n return self.category.name", "def register_log_category(\n self, name: str, predicate: Callable[[str, str, Fmi2Status_T], bool]\n ):\n self._logger.register_new_category(name, predicate)", "def category_names(self, category_names):\n\n self._category_names = category_names", "def name(self, value):\r\n self.cat_constructor.Name = value", "def update_category(self, category: str) -> None:\n self.category = category", "def category_name(self):\n return conf.lib.clang_getDiagnosticCategoryText(self)", "def category_set_string(self, index, string):\r\n\t\tself.categories[index]['string'] = self.format_raw_string(string.lower())", "def set_logger_name(self, name) -> None:\n self.logger = logging.getLogger(LOGGER_PATH + \".\" + name)", "def rename(self, name):\n self._name = name\n self._logger = logging.getLogger(name)\n self._logger.setLevel(self._level)", "def get_category_name(self):\n cat = self.get_category()\n if cat is None:\n return \"Unknown\"\n return cat.name", "def get_category_name(self):\n return self._ctx.get(\"name\", self._ctx[\"id\"])", "def category_prefix(self, prefix):\n if prefix[-1:] == \":\":\n prefix = prefix[:-1]\n self._cat_prefix = prefix" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Permet de trouver la position du pion dans la grille du morpion en fonction de l'endroit où le joueur a cliqué.
def trouve_position(self, x, y): xpos = 0 ypos = 0 # xpos est la position en x dans la grille if (x > 200) and (x < (self.width - 400) / 4 + 200): xpos = 1 if (x > (self.width - 400) / 3 + 200) and (x < (self.width - 400) * 2 / 3 + 200): xpos = 2 if (x > (self.width - 400) * 2 / 3 + 200) and (x < self.width - 200): xpos = 3 # ypos est la position en y dans la grille if (y > 0) and (y < self.height / 4): ypos = 3 if (y > self.height / 4) and (y < self.height / 2): ypos = 2 if (y > self.height / 2) and (y < self.height * 3 / 4): ypos = 1 return xpos, ypos
[ "def posAnteior(self):\n # # BUG: NÃO TA FAZENDO MUDANÇA DE POSIÇÃO\n linha = self.posicao[0]\n coluna = self.posicao[1]\n Arena.ponto(linha, coluna)", "def position_of(self, p: P2) -> int:\n p0 = self.start_point # 直线段起点\n d = self.direct # 直线方向\n\n p0_t0_p: P2 = p - p0 # 点 p0 到 p\n k: float = d * p0_t0_p # 投影\n project: P2 = k * d # 投影点\n\n vertical_line: P2 = p0_t0_p - project # 点 p 的垂线方向\n\n # 垂线长度 0,说明点 p 在直线上\n if vertical_line == P2.zeros():\n return 0\n\n # 归一化\n vertical_line = vertical_line.normalize()\n right_hand: P2 = d.rotate(\n BaseUtils.angle_to_radian(-90)).normalize() # 右手侧\n\n if vertical_line == right_hand:\n return 1\n else:\n return -1", "def move_to_position2(self):", "def go_to_prison(self):\n self.coordinate_x = 56\n self.coordinate_y = 819 - 20 * self.player_number\n self.cell = 10", "def modpos(pos,L,move):\n pos += move\n if pos == L: #moved off right or bottom\n return(0)\n if pos == -1:#moved off top or left\n return(L-1)\n return(pos) #in the middle", "def move_to_position1(self):", "def get_main_position(self):", "def getPositionCG(self):\n return self.getOrigine().changeRef(refSol)", "def peut_placer(grille,bateau,position,direction):\n x = position[0]\n y = position[1]\n if y + bateau > 10 and x + bateau > 10 :\n return False\n if direction == 1: # test horizontalement\n if y + int(bateau) > 10:\n return False\n for i in range(int(bateau)): # 1 a 4\n if grille[x][y+i] != 0:\n return False\n return True\n else:\n if x + int(bateau) > 10:\n return False\n for i in range(int(bateau)):\n if grille[x+i,y] != 0:\n return False\n return True", "def posiciona_proxima(self, posicao):\n self.linha_inicial += self.altura_da_linha # incrementa a posição para montar na linha de baixo\n self.posicoes_montadas += [posicao] # adiciona o índice desta peça na lista de peças montadas\n if self.posicoes_montadas == self.posicoes_corretas:\n self.acertou() # invoca a ação acertou se montou nas posições corretas\n return 300, self.linha_inicial\n else:\n if len(self.posicoes_montadas) == 4: # se montou qutro peças incorretas reinicia o game\n [linha.zera() for linha in self.linhas] # volta as peças para o topo\n self.posicoes_montadas = [] # indica que nenhuma peça foi montada\n self.linha_inicial = 300 # inicia a altura de ontagem da primeira peça\n return 0, 0 # retorna uma posição inválida para sinalizar a peça\n return 300, self.linha_inicial", "def getCarteAt(self, pos):\r\n #A compléter\r", "def position_in_operon(self):\n if self.transcription_units:\n tu_lengths = [len(tu.location) for tu in self.transcription_units]\n longest_tu = self.transcription_units[int(np.argmax(tu_lengths))]\n if longest_tu.location.strand == 1:\n gene_starts = sorted([gene.location.start.position for gene in longest_tu.genes])\n this_gene_start = self.location.start.position\n else:\n gene_starts = sorted([gene.location.end.position for gene in longest_tu.genes])\n gene_starts.reverse()\n this_gene_start = self.location.end.position\n position = np.where(np.array(gene_starts) == this_gene_start)[0][0] + 1\n else:\n position = 1\n return position", "def GetPosition2(self):\n ...", "def placer_mur(self, joueur, position, orientation):\n\n posx = position[0]\n posy = position[1]\n modificationdirection = ''\n if self.infojeu['joueurs'][joueur-1]['murs'] == 0:\n raise QuoridorError\n elif joueur in (1, 2):\n if orientation == 'horizontal':\n modificationdirection = 'horizontaux'\n for _, value in enumerate(self.infojeu['murs']['horizontaux']):\n if value[0] == posx and value[1] == posy:\n raise QuoridorError\n elif (value[0] == posx+1 or value[0] == posx-1) and value[1] == posy:\n raise QuoridorError\n elif posx >= 9 or posx < 1:\n raise QuoridorError\n elif posy <= 1 or posy > 9:\n raise QuoridorError\n for i, value in enumerate(self.infojeu['murs']['verticaux']):\n if value[0] == posx+1 and value[1] == posy-1:\n raise QuoridorError\n elif orientation == 'vertical':\n modificationdirection = 'verticaux'\n for i, value in enumerate(self.infojeu['murs']['verticaux']):\n if value[0] == posx and value[1] == posy:\n raise QuoridorError\n elif (value[1] == posy-1 or value[1] == posy+1) and value[0] == posx:\n raise QuoridorError\n elif posy >= 9 or posy < 1:\n raise QuoridorError\n elif posx <= 1 or posx > 9:\n raise QuoridorError\n for i, value in enumerate(self.infojeu['murs']['horizontaux']):\n if value[0] == posx-1 and value[1] == posy+1:\n raise QuoridorError\n else:\n raise QuoridorError\n jfonction = [self.infojeu['joueurs'][0]['pos'], self.infojeu['joueurs'][1]['pos']]\n mhfonction = self.infojeu['murs']['horizontaux']\n mvfonction = self.infojeu['murs']['verticaux']\n graphe = construire_graphe(jfonction, mhfonction, mvfonction)\n self.infojeu['murs'][modificationdirection].append([posx, posy])\n if joueur == 1:\n if not nx.has_path(graphe, tuple(self.infojeu['joueurs'][joueur-1]['pos']), 'B1'):\n self.infojeu['murs'][modificationdirection].pop()\n raise QuoridorError\n else:\n if not nx.has_path(graphe, tuple(self.infojeu['joueurs'][joueur-1]['pos']), 'B2'):\n self.infojeu['murs'][modificationdirection].pop()\n raise QuoridorError\n self.infojeu['joueurs'][joueur-1]['murs'] -= 1", "def _goal_position(self, raw_data):\n return raw_data['goal_position_ego_n2']", "def movePos(self,p,intMove):\n return pos(p.pos.x-(intMove*self.intPlayer(p.color)),p.pos.y+self.intPlayer(p.color))", "def placerPions(grille):\n n=len(grille)\n for x in range(len(grille)):\n if x%2==0:#Place les fantassins et les cavaliers des colonnes paire\n grille[0][x][1]=\"F\"\n grille[1][x][1]=\"C\"\n grille[n-2][x][1]=\"F\"\n grille[n-1][x][1]=\"C\"\n else:#Place les fantassins et les cavaliers des colonnes impaire\n grille[0][x][1]=\"C\"\n grille[1][x][1]=\"F\"\n grille[n-2][x][1]=\"C\"\n grille[n-1][x][1]=\"F\"\n #Indique a quel joueur appartiennent les pièces.\n grille[0][x][2]=\"1\"\n grille[1][x][2]=\"1\"\n grille[n-2][x][2]=\"2\"\n grille[n-1][x][2]=\"2\"\n return grille", "def get_real_primer_position(self,this_primer):\n rev_compl=None\n position=this_primer['startpos']\n primer_sequence=this_primer['sequence']\n if not primer_sequence or primer_sequence=='':\n return\n if position<0:\n rev_compl=1\n position=len(self.parent.data['DNAseq'])-(abs(position)+len(primer_sequence))\n #\n # Reverse\n #\n primer_sequence=''\n for letter in this_primer['sequence']:\n primer_sequence=letter+primer_sequence\n #position=position+1\n return position", "def position_robot(self):\n x = 0\n y = 0\n while y < len(self.lignes_labyrinthe):\n if self.robot in self.lignes_labyrinthe[y]:\n while x < len(self.lignes_labyrinthe[y]):\n if self.robot in self.lignes_labyrinthe[y][x]:\n return y, x\n x += 1\n y += 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests whether ``TextInputStyle`` instance values are all the expected value type.
def test__TextInputStyle__value(): for instance in TextInputStyle.INSTANCES.values(): vampytest.assert_instance(instance.value, TextInputStyle.VALUE_TYPE)
[ "def _isvalid(self, attr_values):\n attr_types = attrs(self.model)\n value_types = {a: v.__class__ for a, v in attr_values.items()}\n\n for attr, value_type in value_types.items():\n if value_type is not attr_types[attr]:\n msg = \"%s value should be type %s not %s\"\n raise TypeError(msg % (attr, attr_types[attr], value_type))", "def _check_value_type(self, value):\n if value is not None and self.value_type is not None:\n valid = isinstance(value, self.value_type)\n if not valid:\n return False\n return True", "def testValue(self, val):\n _a = self.__attr\n if _a == 'setAngle':\n _val = util.getFloat(val)\n elif _a == 'setAlignment':\n if not isinstance(val, int):\n raise TypeError, \"Invalid alignment type: \" + `type(val)`\n if (val != TextStyle.ALIGN_LEFT and\n val != TextStyle.ALIGN_CENTER and\n val != TextStyle.ALIGN_RIGHT):\n raise ValueError, \"Invalid alignment value: %d\" % val\n _val = val\n elif _a == 'setColor':\n if not isinstance(val, color.Color):\n raise TypeError, \"Invalid Color: \" + `type(val)`\n _val = val\n elif _a == 'setFamily':\n if not isinstance(val, types.StringTypes):\n raise TypeError, \"Invalid family type: \" + `type(val)`\n _val = val\n elif _a == 'setStyle':\n if not isinstance(val, int):\n raise TypeError, \"Invalid style type: \" + `type(val)`\n if (val != TextStyle.FONT_NORMAL and\n val != TextStyle.FONT_OBLIQUE and\n val != TextStyle.FONT_ITALIC):\n raise ValueError, \"Invalid style value: %d\" % val\n _val = val\n elif _a == 'setWeight':\n if not isinstance(val, int):\n raise TypeError, \"Invalid weight type: \" + `type(val)`\n if (val != TextStyle.WEIGHT_NORMAL and\n val != TextStyle.WEIGHT_LIGHT and\n val != TextStyle.WEIGHT_BOLD and\n val != TextStyle.WEIGHT_HEAVY):\n raise ValueError, \"Invalid weight value: %d\" % val\n _val = val\n elif _a == 'setSize':\n _val = util.get_float(val)\n if _val < 0.0:\n raise ValueError, \"Invalid size: %g\" % _val\n else:\n raise ValueError, \"Unexpected attribute: \" + _a\n return _val", "def test_types(self):\n values.String.validate('String value')\n\n for cls in (int, float, bool):\n with self.assertRaises(TypeError):\n values.String.validate(cls('1'))", "def is_text(self):\n return self.value_type in (str, unicode)", "def isStringStyle(self, style):\n return style in [QsciLexerJava.DoubleQuotedString,\n QsciLexerJava.SingleQuotedString,\n QsciLexerJava.UnclosedString,\n QsciLexerJava.VerbatimString]", "def test__validate_button_style__1():\n for input_value in (\n 12.6,\n ):\n with vampytest.assert_raises(TypeError):\n validate_button_style(input_value)", "def isStringStyle(self, style):\n return style in [QsciLexerCSS.DoubleQuotedString,\n QsciLexerCSS.SingleQuotedString]", "def _check_value(self, key, value):\n attr_type = self['_allowed_keys'][key]\n if not isinstance(value, attr_type) \\\n and not (isinstance(value, type) and issubclass(value, attr_type)):\n raise TypeError('attribute \"%s\" only allows type \"%s\"'\n % (key, attr_type.__name__))", "def is_valid_color(value):\n if is_str(value):\n return is_hex_string(value)\n elif is_tuple_or_list(value):\n return (is_tuple_or_list(value)\n and is_three_channeled(value)\n and has_valid_channel_values(value))\n else:\n return is_str_or_coll(value)", "def test__validate_button_style__0():\n for input_value, expected_output in (\n (ButtonStyle.red, ButtonStyle.red),\n (ButtonStyle.red.value, ButtonStyle.red)\n ):\n output = validate_button_style(input_value)\n vampytest.assert_eq(output, expected_output)", "def validate(self,value):\r\n return type(value) is self.datatype", "def check_value(self, iterable):\n allnumeric = True\n for item in iterable:\n if type(item) not in [int, float]:\n allnumeric = False\n return allnumeric", "def accepts(cls, value: Any) -> bool:\n try:\n cls.convert(value)\n return True\n except ValueError:\n return False", "def _validate_datatype(self, datatype):\n return datatype in VALID_DATATYPES", "def is_input(self):\n # https://html.spec.whatwg.org/multipage/forms.html#category-submit\n if self.style['appearance'] == 'auto' and self.element is not None:\n if self.element.tag in ('button', 'input', 'select', 'textarea'):\n return not isinstance(self, (LineBox, TextBox))\n return False", "def validateInput(self):\n palette = QPalette()\n validInput = self.sender().hasAcceptableInput()\n if validInput:\n palette.setColor(QPalette.Text, Qt.black)\n else:\n palette.setColor(QPalette.Text, Qt.blue)\n self.sender().setPalette(palette)\n self.hasValidInput.emit(validInput)", "def _check_dtype(self):\n\n # assert valid dtype\n if self.dtype not in PRIMITIVE_TYPES:\n raise ValueError(\"Type '{}' is invalid. Following types are \"\n \"allowed: {}\"\n .format(self.dtype, PRIMITIVE_TYPES.keys()))\n\n # assert valid dtypes for values\n allowed_types = PRIMITIVE_TYPES[self.dtype]\n\n for value in self.values:\n if not isinstance(value, allowed_types):\n raise TypeError(\"Column '{}' has invalud value '{}' with \"\n \"invalid type '{}'. Allowed types are: {}.\"\n .format(self.name,\n value,\n type(value),\n allowed_types))", "def check_inputs_shapes(inputs):\n valid = True\n shapes = []\n for _, input in inputs.items():\n shapes.append(tuple(input.get_shape().as_list()))\n shapes = list(set(shapes))\n if len(shapes) != 1:\n valid = False\n return valid" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that example.com was in the dashboard.
def test_link_list(self): response = self.client.get('/tests/dashboard/') self.assertEqual(response.status_code, 200) self.assertContains(response, "example.com")
[ "def test_dashboard_page(self):\r\n\r\n result = self.client.get(\"/dashboard\", follow_redirects = True)\r\n self.assertNotIn(b\"Family Ties - Dashboard\", result.data)", "def test_dashboards_v2_show(self):\n pass", "def test_analytics_id(self):\n response = self.client.get(reverse('home'))\n self.assertContains(response, 'MyAwesomeAnalyticsCode')", "def test_dashboard_get_one_dashboard(self):\n pass", "def test_showing_dietitian_homepage(self):\n\n result = self.client.get(\"/dietitian/1\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Dietitian Dashboard\", result.data)\n\n result = self.client.get(\"/dietitian/2\", follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"not authorized\", result.data)", "def test_search_dashboard_sharing(self):\n pass", "def test_dashboards_v2_link(self):\n pass", "def test_dashboard_view(self):\n target_url = url_for('dashboard.dashboard_panel')\n redirect_url = url_for('users.login', next=target_url)\n response = self.client.get(target_url)\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(response, redirect_url)", "def test_dashboard_view(self):\n target_url = url_for('home.dashboard')\n redirect_url = url_for('auth.login', next=target_url)\n response = self.client.get(target_url)\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(response, redirect_url)", "def test_dashboards_v2_request_access(self):\n pass", "def test_tenant_external_domain_should_be_accessible(self):\n response = self.client.get(self.home_url, HTTP_HOST=self.domain.domain)\n self.assertEqual(response.status_code, 200)", "def test_about_url_exists_at_desired_location(self):\r\n response = self.guest_client.get('/page/about/')\r\n self.assertEqual(response.status_code, 200)", "def test_tech_url_exists_at_desired_location(self):\n response = self.guest_client.get('/about/tech/')\n self.assertEqual(response.status_code, 200)", "def test_login_required_dashboard(self):\r\n response = self.client.get(reverse('dashboard'))\r\n self.assertEqual(response.status_code, 302)\r\n self.assertEqual(response['Location'], 'http://testserver/accounts/login?next=/dashboard')", "def test_search_dashboard(self):\n pass", "def test_user_dashboard_not_logged(self):\n # Make the request\n response = self.client.get(self.dashboard_url)\n # Check that redirects to corresponding url\n self.assertRedirects(response,\n self.login_url+'?next='+self.dashboard_url)", "def test_search_dashboard_panel(self):\n pass", "def test_zuul_accessible(self):\n url = config.GATEWAY_URL + \"/zuul/\"\n resp = requests.get(\n url,\n cookies=dict(\n auth_pubtkt=config.USERS[config.USER_1]['auth_cookie']))\n self.assertEqual(resp.status_code, 200)\n self.assertTrue('<title>Zuul Status</title>' in resp.text)", "def test_showing_patient_homepage(self):\n\n result = self.client.get(\"/patient/1\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Patient Dashboard\", result.data)\n\n result = self.client.get(\"/patient/2\", follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"not authorized\", result.data)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that the admin list found the User and Group admins
def test_admin_list(self): response = self.client.get('/tests/dashboard/') self.assertEqual(response.status_code, 200) self.assertContains(response, '<a href="/admin/auth/group/">Group</a>', html=True) self.assertContains(response, '<a href="/admin/auth/user/">User</a>', html=True)
[ "def list_admin() -> None:\n admin_users = list(User.objects(admin=True).scalar('email'))\n if admin_users:\n echo('Allowed admins are')\n for email in admin_users:\n echo('- %s' % email)\n else:\n echo('No admins found')\n\n users = list(User.objects(admin=False).scalar('email'))\n if users:\n echo('Rest of users are:')\n for email in users:\n echo('- %s' % email)", "def test_admin_calendar_user_admin_list(self):\n response = self.client.get(\"/admin/auth/calendaruser/\")\n self.assertEqual(response.status_code, 200)", "def test_filter_admins(self) -> None:\n\n # Register an additional non admin user\n self.register_user(\"user\", \"pass\", admin=False)\n\n # Query all users\n channel = self.make_request(\n \"GET\",\n f\"{self.url}\",\n access_token=self.admin_user_tok,\n )\n self.assertEqual(200, channel.code, channel.result)\n self.assertEqual(2, channel.json_body[\"total\"])\n\n # Query only admin users\n channel = self.make_request(\n \"GET\",\n f\"{self.url}?admins=true\",\n access_token=self.admin_user_tok,\n )\n self.assertEqual(200, channel.code, channel.result)\n self.assertEqual(1, channel.json_body[\"total\"])\n self.assertEqual(1, channel.json_body[\"users\"][0][\"admin\"])\n\n # Query only non admin users\n channel = self.make_request(\n \"GET\",\n f\"{self.url}?admins=false\",\n access_token=self.admin_user_tok,\n )\n self.assertEqual(200, channel.code, channel.result)\n self.assertEqual(1, channel.json_body[\"total\"])\n self.assertFalse(channel.json_body[\"users\"][0][\"admin\"])", "def test_admin_user_list(self):\n # No users\n rv = self.app.get('/admin/user')\n assert rv.status_code == 200\n\n # More than 0 users\n self.add_user(self.TESTUSER)\n rv = self.app.get('/admin/user', follow_redirects=True)\n assert self.TESTUSER['name'] in rv.data", "def test_01_admin_index(self):\r\n self.register()\r\n res = self.app.get(\"/admin\", follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"There should be an index page for admin users and apps\"\r\n assert \"Settings\" in res.data, err_msg\r\n divs = ['featured-apps', 'users', 'categories', 'users-list']\r\n for div in divs:\r\n err_msg = \"There should be a button for managing %s\" % div\r\n assert dom.find(id=div) is not None, err_msg", "def test_get_admins_method(self):\n self.assertEqual(len(self.board.get_admins()), 1)", "def test_admin(self):\n assert(admin)", "def test_admin(self):\r\n \r\n self.assertEqual(False, self.user.isAdmin)", "def test_admin_page_rights(self) -> None:\n\n self.get_html_response('/admin', expected_status_int=302)\n\n # Login as a non-admin.\n self.login(self.EDITOR_EMAIL)\n self.get_html_response('/admin', expected_status_int=401)\n self.logout()\n\n # Login as an admin.\n self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True)\n self.get_html_response('/admin')\n self.logout()", "def test_users_listed(self):\n # the url is defined in django admin documentation\n # it generate the url for the list of user page\n # it is good using that instead of the url in case it changes\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.email)", "def user_is_admin(user):\n return user in admins", "def test_users_listed(self):\n url = reverse('admin:core_user_changelist') ## generate user list page, check django admin docs\n response = self.client.get(url)\n\n self.assertContains(response, self.user.name)\n self.assertContains(response, self.user.email)", "def get_admins(self):\n admins = User.objects.filter(Q(groups__name=self.admin_group_name()) | Q(is_superuser=True)).distinct()\n return admins", "def admins(self):\n return User.objects.filter_by_role(role=Roles.GROUP_ADMIN, roles__group=self)", "def is_admin(self,user):\n if user.is_superuser:\n return True\n\n if user.groups.filter(name=self.admin_group_name).count() > 0:\n return True\n else:\n return False", "def test_link_to_admin_when_logged_in(self):\n User.objects.create_user('test', 'dudarev+test@gmail.com', 'test')\n self.client.login(username='test', password='test')\n response = self.client.get(reverse('index'))\n self.assertTrue('(admin)' in response.content)", "def test_return_error_if_admin_trying_to_add_non_existing_users(self):\n self.client.credentials(\n HTTP_AUTHORIZATION='Token ' + self.admin_user.auth_token.key\n )\n\n payload = {'users': ['Samuel']}\n response = self.client.patch(\n reverse('api:group-detail', args=[self.admin_group.name]),\n data=payload, format='json'\n )\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n response = self.client.put(\n reverse('api:group-detail', args=[self.admin_group.name]),\n data=payload, format='json'\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def show_admins(self, data):\n admin_names = [self.api.people.get(i).displayName for i in self.admins.get_admins()]\n global_admin_names = [self.api.people.get(i).displayName for i in GLOBAL_ADMINS]\n global_admins_nice = '- '.join([(i + ' (global)\\n') for i in global_admin_names])\n admins_nice = '- '.join([(i + '\\n') for i in set(admin_names) - set(global_admin_names)])\n\n if global_admins_nice:\n admins_nice += ('\\n- ' if admins_nice else '') + global_admins_nice\n\n self.create_message(\n \"Admins for '\" + str(self.project.get_project()) + \"' are:\\n- \" + admins_nice,\n roomId=data['roomId']\n )", "def test_admin_user_info(self):\n self.add_user(self.TESTUSER)\n\n # Missing user\n rv = self.app.get('/admin/user/nobody', follow_redirects=True)\n assert b'Användaren existerar inte!' in rv.data\n\n # Existing user\n rv = self.app.get('/admin/user/%s' % self.TESTUSER['barcode'], follow_redirects=True)\n assert self.TESTUSER['name'] in rv.data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Backup the git refs.
def backup_ref(self): # Back ourselves up! backup_ref="refs/backups/{0}-{1}-{2}".format(self.ref_type, self.ref_name, int( time.time() )) command = ("git", "update-ref", backup_ref, self.old_sha1) process = subprocess.Popen(command, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
[ "def backup_repo(self):\n self.log.info('Initialized backup of repo.')\n try:\n dest = os.path.join(self.backup_dir, '%s.git' % (self.project))\n local('rsync -avz /var/git/projects/%s/ %s' % (self.project, dest))\n except:\n self.log.exception('Backing up the repo was unsuccessful.')\n raise\n else:\n self.log.info('Backup of repo successful.')", "def _stash_and_checkout(repo, version):\n repo.git.stash()\n repo.git.checkout(version)\n repo.git.clean(\"-df\")", "def backup_database():\n db_path = os.path.join(config.cum_dir, 'cum.db')\n backup_path = os.path.join(config.cum_dir, 'cum.db.bak')\n copyfile(db_path, backup_path)", "def backup(self):\n self.logger.info(\"Backing up current version of model...\")\n self.save_checkpoint(filename='backup.pth.tar')", "def flush_history(self):\n shred_dir(os_join(self.directory, '.git'))\n self.git.init()\n self.git.add([self.encrypted_dir, '.gitignore'])\n self.git.commit('Clean git History')", "def backup(self):\r\n print('Backing up old files...')\r\n\r\n # Connect with SSH-PubKey and execute backup script\r\n subprocess.run(\r\n ['ssh',\r\n '-i', self.ssh_key,\r\n '-o', 'StrictHostKeyChecking=no',\r\n 'robot@{}'.format(self.settings['ip']),\r\n 'robolab-backup'\r\n ])\r\n\r\n print('Done.')", "def backup():\n backup_shift(os, config.utils.tasks.backup_depth)\n if config.utils.tasks.secret_key is None:\n shutil.copyfile(config.core.database_name, config.core.database_name+'.1')\n else:\n data = get_encrypted_database()\n with open(config.core.database_name+'.1', 'wb') as f:\n f.write(data)", "def do_backup_w_args():\n backup()", "def backup_db():\n try:\n subprocess.check_output(['ls dump'], shell=True);\n subprocess.check_call(['rm -rf dump*'], shell=True)\n except subprocess.CalledProcessError:\n pass\n subprocess.check_call(['mongodump'])\n subprocess.check_call(['cp', '-rf', 'dump', 'dump.bak'])\n subprocess.check_output(['ls dump.bak'], shell=True);", "def _save_state(self):\n with open(os.path.join(self._workdir, '.git', 'drover'), 'wb') as f:\n cPickle.dump(self, f)", "def __makeBackup(self):\n pass #FIXME!!!", "def _cleanup(self):\n self._get_repo().git.reset('--hard', 'HEAD')", "def backup(self):\n self.rollback_steps.insert(0, self.mongos.start_balancer)\n self.run_step(self.mongos.stop_balancer, 2)\n\n self.run_step(self.wait_for_locks)\n\n self.rollback_steps.insert(0, self.finish_shards_maintenance)\n self.run_step(self.prepare_shards_maintenance)\n\n self.run_step(self.backup_dump)\n\n self.rollback_steps.remove(self.finish_shards_maintenance)\n self.run_step(self.finish_shards_maintenance, 2)\n\n self.rollback_steps.remove(self.mongos.start_balancer)\n self.run_step(self.mongos.start_balancer, 4) # it usually starts on\n # the second try\n\n if self.backup_bucket is not None:\n run(\"rmdir %s\" % self.backup_path)\n\n logging.info(\"Finished successfully\")", "def backup(self):\n # s=colorize(value.strftime(\"%Y-%m-%d\"),color)\n prefix=DateTime.now().strftime(\"%Y%m%d_%H%M%S\")+\"_\"\n p_backup = self._config._path_backup\n f_todo=self._config._file_todo\n f_todo_backup=os.path.join(p_backup, prefix+self._config.todo_backup)\n f_todo_archive=self._config._file_archive\n f_todo_archive_backup=os.path.join(p_backup, prefix+self._config.archive_backup)\n\n logger.debug(\"### Archiving\")\n logger.debug(\" -%s\",f_todo)\n logger.debug(\" %s\",f_todo_backup)\n logger.debug(\" -%s\",f_todo_archive)\n logger.debug(\" %s\",f_todo_archive_backup)\n shutil.copy(src=f_todo,dst=f_todo_backup)\n shutil.copy(src=f_todo_archive,dst=f_todo_archive_backup)", "def backuptoluna():\n # environment settings\n env = environ.Env()\n root = env('LEUKDC_BACKUPDB_DIR') + datetime.now().strftime(\"/%Y/%m/%d/\")\n db, commit = \"db\", \"commit\"\n\n # ssh setting\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(\n env('LEUKDC_HOST'),\n username=env('LEUKDC_USER'),\n password=env('LEUKDC_PASSWORD'),\n key_filename=env('LEUKDC_SSHKEY'),\n )\n\n ssh.exec_command(\"mkdir -p \" + root)\n sleep(1) # Adds sleep to wait for ssh creation of directory\n\n # dump files\n with SCPClient(ssh.get_transport()) as scp:\n scp.put(db, root)\n scp.put(commit, root)", "def dump_git_database(db, client, rc):\n dbdir = dbdirname(db, rc)\n # dump all of the data\n to_add = client.dump_database(db)\n # update the repo\n cmd = ['git', 'add'] + to_add\n subprocess.check_call(cmd, cwd=dbdir)\n cmd = ['git', 'commit', '-m', 'regolith auto-commit']\n try:\n subprocess.check_call(cmd, cwd=dbdir)\n except subprocess.CalledProcessError:\n warn('Could not git commit to ' + dbdir, RuntimeWarning)\n return\n cmd = ['git', 'push']\n try:\n subprocess.check_call(cmd, cwd=dbdir)\n except subprocess.CalledProcessError:\n warn('Could not git push from ' + dbdir, RuntimeWarning)\n return", "def flush_gitclone():\n if os.path.exists(DL_DIR):\n shutil.rmtree(DL_DIR, onerror=del_rw)", "def refresh_git_backup(repository_record, job_result, delete=False): # pylint: disable=unused-argument\n job_result.log(\n \"Successfully Pulled git repo\",\n level_choice=LogLevelChoices.LOG_SUCCESS,\n )", "def backup_current_config():\n LOGGER.debug(\"Backing up current config\")\n\n backup(VIMRC)\n backup(VIMDIR)\n #backup(BASHRC)\n #backup(ZSHRC)\n backup(PYLINTRC)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Whether the audit failed (True) or passed (False).
def audit_failed(self): return self.__failed
[ "def is_failed(self):\n return self.status.value and self.status.value.upper() == \"FAILED\"", "def hasFailed(self):\n record = self.getRunRecord().getRecord(\"run\")\n return record.state is FAIL", "def is_fail(self):\n return self.command == CommandResponse.fail.value", "def failed(oc:Outcome[L, R]) -> bool:\n if isinstance(oc, Failure):\n return True\n else:\n return False", "def is_failing(self):\n return self.current_state == self.States.FAILED", "def failed(self) -> bool:\n return not self.ok", "def failed(seg):\r\n return seg._marked is False and seg._uploaded is False", "def task_is_failure(task):\n\n if task and task.state == 'FAILURE':\n return True\n return False", "def isFailure(code):\n if code == StatusCode.FAILED:\n return True\n return False", "def indicate_failure(self):\n pass", "def passed(self):\n if self.result == RESULT_PASS:\n return True\n\n return False", "def is_failed_user_data_retrieval(self):\n return self._tag == 'failed_user_data_retrieval'", "def success(self):\n return self.retcode == 0", "def failed_roboscript(self) -> bool:\n return pulumi.get(self, \"failed_roboscript\")", "def failed_assert(self):\n return self._failed_assert", "def zero_failures(self) -> bool:\n return abs(self.failurerate) < 1e-7", "def _job_was_successful(self, status):\n success = True\n\n # https://cloud.google.com/life-sciences/docs/reference/rest/v2beta/Event\n for event in status[\"metadata\"][\"events\"]:\n\n logger.debug(event[\"description\"])\n\n # Does it always result in fail for other failure reasons?\n if \"failed\" in event:\n success = False\n action = event.get(\"failed\")\n logger.debug(\"{}: {}\".format(action[\"code\"], action[\"cause\"]))\n\n elif \"unexpectedExitStatus\" in event:\n action = event.get(\"unexpectedExitStatus\")\n\n if action[\"exitStatus\"] != 0:\n success = False\n\n # Provide reason for the failure (desc includes exit code)\n msg = \"%s\" % event[\"description\"]\n if \"stderr\" in action:\n msg += \": %s\" % action[\"stderr\"]\n logger.debug(msg)\n\n return success", "def has_failures_or_errors(self):\r\n return (self._num_failures() > 0) or (self._num_script_errors() > 0)", "def has_errored(edit_context_entity):\n return _get_context(edit_context_entity).errored" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Audit the commit for proper endofline characters. The UNIX type EOL is the only allowed EOL character.
def audit_eol(self): # Regex's.... re_commit = re.compile("^\xff(.+)\xff$") re_filename = re.compile("^diff --(cc |git a\/.+ b\/)(.+)$") blocked_eol = re.compile(r"(?:\r\n|\n\r|\r)$") # Bool to allow special files such as vcards to bypass the check eol_allowed = False # Do EOL audit! process = get_change_diff( self.repository, ["-p"] ) for line in process.stdout: commit_change = re.match( re_commit, line ) if commit_change: commit = commit_change.group(1) continue file_change = re.match( re_filename, line ) if file_change: filename = file_change.group(2) eol_violation = False eol_allowed = False # Check if it's an allowed mimetype # First - check with the mimetypes system, to see if it can tell guessed_type, _ = mimetypes.guess_type(filename) if guessed_type in self.ALLOWED_EOL_MIMETYPES: eol_allowed = True continue # Second check: by file extension # NOTE: This uses the FIRST dot as extension splitted_filename = filename.split(os.extsep) # Check if there's an extension or not # NOTE This assumes that files use dots for extensions only! if len(splitted_filename) > 1: extension = splitted_filename[1] if extension in self.ALLOWED_EOL_EXTENSIONS: eol_allowed = True continue # Unless they added it, ignore it if not line.startswith("+"): continue if re.search( blocked_eol, line ) and not eol_violation: # Is this an allowed filename? if eol_allowed: continue # Failure has been found... handle it eol_violation = True self.__log_failure(commit, "End of Line Style (non-Unix): " + filename);
[ "def _output_commit_line(self): # noqa: C901, E501 pylint: disable=too-many-branches\n seen_this = False\n chars_written = 0\n for i in range(self.num_columns + 1):\n if i == self.num_columns:\n if seen_this:\n break\n col_commit = self.commit\n else:\n col = self.columns[i]\n col_commit = self.columns[i].commit\n\n if col_commit == self.commit:\n seen_this = True\n self.buf += '*'\n chars_written += 1\n\n if self.num_parents > 2:\n chars_written += self._draw_octopus_merge()\n elif seen_this and self.num_parents > 2:\n self._write_column(col, '\\\\')\n chars_written += 1\n elif seen_this and self.num_parents == 2:\n # This is a 2-way merge commit. There is no\n # GraphState.PRE_COMMIT stage for 2-way merges, so this is the\n # first line of output for this commit. Check to see what the\n # previous line of output was.\n #\n # If it was GraphState.POST_MERGE, the branch line coming into\n # this commit may have been '\\', and not '|' or '/'. If so,\n # output the branch line as '\\' on this line, instead of '|'.\n # This makes the output look nicer.\n if (self.prev_state == GraphState.POST_MERGE and\n self.prev_commit_index < i):\n self._write_column(col, '\\\\')\n else:\n self._write_column(col, '|')\n chars_written += 1\n else:\n self._write_column(col, '|')\n chars_written += 1\n self.buf += ' '\n chars_written += 1\n\n self._pad_horizontally(chars_written)\n if self.num_parents > 1:\n self._update_state(GraphState.POST_MERGE)\n elif self._is_mapping_correct():\n self._update_state(GraphState.PADDING)\n else:\n self._update_state(GraphState.COLLAPSING)", "def _detect_eol(self):\n if hasattr(self, \"_notify\") and self._notify:\n raise NumatoGpioError(\n \"Can't reliably detect the end-of-line-sequence \"\n \"as notifications are already enabled.\")\n\n # initial assumption required for basic operation of the reader thread\n self._eol = \"\\r\\n\"\n\n with self._rw_lock:\n self._write(\"{}\\r\".format(\"id get\").encode())\n response = self._read_until(\">\")\n eol = response[-3:-1]\n if eol[0] not in [\"\\r\", \"\\n\"]:\n eol = eol[1]\n self._eol = eol", "def ensure_eol(self):\n if self.body and self.body[-1][-1] != '\\n':\n self.body.append('\\n')", "def eat_EOL(self):\n # print(\"Start eating EOL\")\n self.eat(EOL)\n while self.current_token.type == EOL:\n self.eat(EOL)\n # print(\"Stop eating EOL\")", "def escape_eol_chars(options):\n pass", "def eol_at_eof(line):\n if len(line) == 0 or line[-1] != '\\n':\n return ERR_NO_EOL_AT_EOF\n else:\n return None", "def test_message_truncated_correctly_commit_log_entry(self):\n commit = collection_models.CollectionCommitLogEntryModel.create(\n 'b', 0, 'committer_id', 'a', 'a' * 400, [{}],\n constants.ACTIVITY_STATUS_PUBLIC, False)\n commit.collection_id = 'b'\n commit.update_timestamps()\n commit.put()\n self._run_one_off_job()\n self.assertEqual(\n len(\n collection_models.CollectionCommitLogEntryModel.get_by_id(\n commit.id).commit_message),\n 375)\n\n # Ensure nothing happens to messages of proper length.\n self._run_one_off_job()\n self.assertEqual(\n len(\n collection_models.CollectionCommitLogEntryModel.get_by_id(\n commit.id).commit_message),\n 375)", "def deal_lines(self, lines, conf):\n if lines == ['']:\n print \"NO new %s commit!\" % conf\n else:\n for line in lines:\n if re.search('\\d+ files? changed', line) is None:\n pos = line.find(' ')\n if pos != -1:\n try:\n parts = line.split(' ', 2)\n commit_id = parts[0]\n self.current_commit = commit_id\n stamp = int(parts[1])\n ti = datetime.datetime.fromtimestamp(float(stamp))\n s_time = datetime.datetime.fromtimestamp(float(0))\n if self.start_date == s_time:\n self.start_date = ti\n elif self.start_date > ti:\n self.start_date = ti\n author, mail = parts[2].split('<', 1)\n message = mail.split('> ', 1)[1]\n mail = mail.split('>', 1)[0]\n if re.search(': ', message) is not None:\n messagetype = message.split(': ', 1)[0]\n if messagetype not in CLASSIFICATION:\n messagetype = 'OTR'\n else:\n messagetype = 'OTR'\n if commit_id not in self.commit_dictionary:\n self.commit_dictionary[commit_id]\\\n = [commit_id, mail,\n stamp, messagetype,\n messagetype, 0, 0, 0, 0]\n # [files, inserted, deleted, total_lines]\n if mail not in self.author_dictionary:\n self.author_dictionary[mail] = [author,\n mail, 0, 0,\n 0, 0, 1,\n stamp]\n # [files,inserted,deleted,total_lines,commit,stamp]\n else:\n self.author_dictionary[mail][6] += 1\n if stamp > self.author_dictionary[mail][7]:\n self.author_dictionary[mail][7] = stamp\n self.total_patches += 1\n except:\n print 'Warning: unexpected line \"%s\"' % line\n else:\n if conf == 'no_merges':\n try:\n commit_id = self.current_commit\n numbers = self.getstatsummarycounts(line)\n if len(numbers) == 3:\n (files, inserted, deleted) = \\\n map(lambda el: int(el), numbers)\n total_lines = inserted - deleted\n self.commit_dictionary[commit_id][5] = files\n self.commit_dictionary[commit_id][6] = inserted\n self.commit_dictionary[commit_id][7] = deleted\n self.commit_dictionary[commit_id][8] = total_lines\n self.author_dictionary[mail][2] += files\n self.author_dictionary[mail][3] += inserted\n self.author_dictionary[mail][4] += deleted\n self.author_dictionary[mail][5] += total_lines\n self.total_lines_inserted += inserted\n self.total_lines_deleted += deleted\n self.total_lines += total_lines\n self.current_commit = None\n except:\n print 'Warning: unexpected line \"%s\"' % line", "def test_dos_eol():\n import figleaf, figleaf.annotate_html\n \n figleaf.start()\n execfile(os.path.join(thisdir, 'tst_dos_eol.py'))\n figleaf.stop()\n\n coverage = figleaf.get_data().gather_files()\n\n tmpdir = tempfile.mkdtemp('.figleaf')\n\n try:\n figleaf.annotate_html.report_as_html(coverage, tmpdir, [], {})\n finally:\n files = glob.glob('%s/*' % (tmpdir,))\n for f in files:\n os.unlink(f)\n os.rmdir(tmpdir)", "def maybe_eol(self):\n if self.current == CR:\n self.next()\n if self.current == LF:\n self.next()\n elif self.current == LF:\n self.next()", "def get_EOL(file):\n f=open(file,'r')\n firstline=f.read(350)\n EOL=\"\"\n for k in range(350):\n if firstline[k:k+2] == \"\\r\\n\":\n print file, ' appears to be a dos file'\n EOL='\\r\\n'\n break\n if EOL==\"\":\n for k in range(350):\n if firstline[k] == \"\\r\":\n print file, ' appears to be a mac file'\n EOL='\\r'\n if EOL==\"\":\n print file, \" appears to be a unix file\"\n EOL='\\n'\n f.close()\n return EOL", "def line_endings(self):\n pass", "def convert_line_endings():\n files = []\n for ext in [\n \".py\",\n \".sh\",\n \"Dockerfile\",\n \".txt\",\n \".csv\",\n \".mhd\",\n \".gitignore\",\n ]:\n files.extend(Path(\".\").glob(f\"**/*{ext}\"))\n\n for file in files:\n with open(str(file), \"rb\") as f:\n lines = f.read()\n\n lines = lines.replace(EOL_WIN, EOL_UNIX).replace(EOL_MAC, EOL_UNIX)\n\n with open(str(file), \"wb\") as f:\n f.write(lines)", "def has_crlf_line_endings():\n process = subprocess.Popen(\n \"git ls-files --eol\",\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n text=True,\n shell=True,\n )\n\n stdout, _ = process.communicate()\n dos_files: list[str] = []\n\n if process.returncode != 0:\n return dos_files\n\n crlf_re = re.compile(\"crlf|mixed\")\n assert process.stdout\n for line in iter(stdout.splitlines()):\n # A typical output of git ls-files --eol looks like below\n # i/lf w/lf attr/ vhost/Cargo.toml\n fields = line.split()\n if fields and crlf_re.search(fields[0] + fields[1]):\n dos_files.append(fields[3] + \"\\n\")\n\n return dos_files", "def __convertEOL(self):\n aw = self.activeWindow()\n aw.convertEols(aw.eolMode())", "def _check_last_character(line_index, input_line, code_character):\n global _total_lines_of_code\n if input_line.endswith(code_character):\n _code_lines.append(line_index)\n _total_lines_of_code += 1", "def GetEOLChar(self):\n m_id = self.GetEOLMode()\n if m_id == wx.stc.STC_EOL_CR:\n return u'\\r'\n elif m_id == wx.stc.STC_EOL_CRLF:\n return u'\\r\\n'\n else:\n return u'\\n'", "def get_commit_log(self, committish):\n\n str = self._getoutput(\"cat-file commit\", committish)\n return str[str.index('\\n\\n') + 2:]", "def _make_commit_line(hook, j, commit):\n l = []\n\n config = hook.config or {}\n show_branch = config.get('show_branch', True)\n show_raw_author = config.get('show_raw_author', False)\n\n l.append('{RESET}[{BLUE}{0}{RESET}]'.format(\n j['repository']['name'],\n **HookService.colors\n ))\n if show_branch and commit['branch']:\n l.append(commit['branch'])\n\n l.append('{LIGHT_CYAN}{0}{RESET}'.format(\n commit['raw_author'] if show_raw_author else commit['author'],\n **HookService.colors\n ))\n l.append('{PINK}{0}{RESET}'.format(\n commit['node'],\n **HookService.colors\n ))\n l.append(commit['message'])\n\n return ' '.join(l)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Audit the file names in the commit.
def audit_filename(self): for commit in self.repository.commits.values(): for filename in commit.files_changed: if commit.files_changed[ filename ]["change"] not in ["A","R","C"]: continue for restriction in self.filename_limits: if re.search(restriction, filename): self.__log_failure(commit.sha1, "Invalid filename: " + filename)
[ "def audit_names_in_metadata(self):\n\n # Iterate over commits....\n for commit in self.repository.commits.values():\n for name in [ commit.committer_name, commit.author_name ]:\n # Is the name whitelisted?\n if name in self.FullNameWhitelist:\n continue\n\n # As a special case, allow the name 'GitHub' for certain repositories\n if name == 'GitHub' and self.repository.path in self.GitHubPRWhitelist:\n self.__log_warning(commit.sha1, \"Commit has username 'GitHub' (web merge of PR); allowing anyway\")\n continue\n\n # Check to see if the name contains spaces - if not - it is probably misconfigured....\n if \" \" not in name.strip():\n self.__log_failure(commit.sha1, \"Non-full name: \" + name)\n continue", "def commit_names(self, commit):\n return []", "def dump_commit_diff(commit):\n\n for file in commit:\n if file[4] == \"\" or \".\" not in file[4]:\n sys.stdout.flush()\n print((\"Index: \" + file[3] + \" deleted\\r\"))\n sys.stdout.flush()\n else:\n subprocess.call([\n \"cvs\",\n \"-d\",\n file[8],\n \"rdiff\",\n \"-u\",\n \"-r\",\n PostsaiCommitViewer.calculate_previous_cvs_revision(file[4]),\n \"-r\",\n file[4],\n file[3]])", "async def audit(self, ctx):", "def get_modified_files(repo, args):\n commit = repo.commit(args.commit)\n return commit.stats.files", "def commit (files):\n\n version = get_tag(comp_versions, 'ACE')\n root_path = get_path()\n files = [i[len(root_path):] if i.startswith(root_path) else i for i in files]\n\n print (\"Committing the following files for \" + version + ':', \" \".join (files))\n\n if opts.take_action:\n for file in files:\n print (\"Adding file \" + file + \" to commit\")\n ex (\"cd $DOC_ROOT/ACE_TAO && git add \" + file)\n\n ex (\"cd $DOC_ROOT/ACE_TAO && git commit -m\\\"\" + version + \"\\\"\")", "def run_audit(self):\n for each_description in self.descriptor_files:\n schema = StateSchema(**each_description)\n\n metadata = schema.metadata\n descriptors = schema.descriptors\n\n Logger.log_info(\n f'Auditing {metadata.archive} in {metadata.stateLegalName} from {metadata.repoName}.'\n )\n\n try:\n # Construct paths\n archive_path = (\n self.mggg_states_dir + \"/\" + metadata.repoName + \"/\" + metadata.archive\n )\n file_path = self.expand_zipfile(archive_path) + metadata.fileName\n\n # Find column names\n total_population_col = descriptors.totalPopulation\n county_fips_col = descriptors.countyFIPS\n\n # Import and read shapefiles\n # DEPRECATED to make stuff more abstractable\n # if county_fips_col:\n # shapefile = gdutils.extract.read_file(\n # file_path, column=county_fips_col\n # )\n # else:\n shapefile = gdutils.extract.read_file(file_path)\n\n\n # Hard checks\n # TODO: get these to automatically load\n self.errors += checks.TotalPopulationCheck(schema, shapefile, self.scratch_dir).audit()\n self.errors += checks.CountyTotalPopulationCheck(schema, shapefile, self.scratch_dir).audit()\n self.errors += checks.FromGraphGerrychainCheck(schema, shapefile, self.scratch_dir).audit()\n\n # Soft checks\n checks.DataExistenceCheck(schema, shapefile, self.scratch_dir).audit()\n\n except KeyboardInterrupt:\n Logger.log_info(\n f'Captured KeyboardInterrupt! Skipping {metadata[\"archive\"]} in {each_description[\"metadata\"][\"stateLegalName\"]} from {each_description[\"metadata\"][\"repoName\"]}!'\n )\n pass", "def changed_files(self):\n commits = ['-r {}'.format(c) for c in self.commits]\n command = [self.vcs, 'diff', '--stat'] + commits\n result = _execute(' '.join(command))\n lines = result.strip().split('\\n')[:-1]\n files = [\n line.split('|')[0].strip()\n for line in lines\n ]\n return filter(self.filter_file, files)", "def saveStatResults(self, changes, file_stats):\n\n # commit_obj = rpc.RpcProxy('software_dev.commit')\n fchange_obj = rpc.RpcProxy('software_dev.filechange')\n \n commit_ids = []\n for chg in changes:\n if not chg.number:\n continue\n commit_ids.append(chg.number)\n \n while len(commit_ids) and len(file_stats):\n cid = commit_ids.pop() # so, we attribute the stats to the\n # last commit that matches their files\n fc_ids = fchange_obj.search([('commit_id','=', cid)])\n fcres = fchange_obj.read(fc_ids, ['filename'])\n # We read all the filenames that belong to the commit and\n # then try to see if we have any stats for them.\n if not fcres:\n continue\n for fcd in fcres:\n fcstat = file_stats.pop(fcd['filename'], False)\n if not fcstat:\n continue\n # now, we have a filechange.id and stats\n fchange_obj.write(fcd['id'], fcstat)", "def audit_eol(self):\n\n # Regex's....\n re_commit = re.compile(\"^\\xff(.+)\\xff$\")\n re_filename = re.compile(\"^diff --(cc |git a\\/.+ b\\/)(.+)$\")\n blocked_eol = re.compile(r\"(?:\\r\\n|\\n\\r|\\r)$\")\n\n # Bool to allow special files such as vcards to bypass the check\n eol_allowed = False\n\n\n # Do EOL audit!\n process = get_change_diff( self.repository, [\"-p\"] )\n for line in process.stdout:\n commit_change = re.match( re_commit, line )\n if commit_change:\n commit = commit_change.group(1)\n continue\n\n file_change = re.match( re_filename, line )\n if file_change:\n filename = file_change.group(2)\n eol_violation = False\n eol_allowed = False\n\n # Check if it's an allowed mimetype\n # First - check with the mimetypes system, to see if it can tell\n guessed_type, _ = mimetypes.guess_type(filename)\n if guessed_type in self.ALLOWED_EOL_MIMETYPES:\n eol_allowed = True\n continue\n\n # Second check: by file extension\n # NOTE: This uses the FIRST dot as extension\n splitted_filename = filename.split(os.extsep)\n # Check if there's an extension or not\n # NOTE This assumes that files use dots for extensions only!\n if len(splitted_filename) > 1:\n extension = splitted_filename[1]\n if extension in self.ALLOWED_EOL_EXTENSIONS:\n eol_allowed = True\n\n continue\n\n # Unless they added it, ignore it\n if not line.startswith(\"+\"):\n continue\n\n if re.search( blocked_eol, line ) and not eol_violation:\n # Is this an allowed filename?\n if eol_allowed:\n continue\n\n # Failure has been found... handle it\n eol_violation = True\n self.__log_failure(commit, \"End of Line Style (non-Unix): \" + filename);", "def git_changed_files(revision=None):\n if revision:\n files = subprocess.check_output(\n [GIT, \"diff\", \"--name-only\", revision]).decode('utf-8')\n return [filename for filename in files.split('\\n')\n if filename]\n else:\n files = subprocess.check_output(\n [GIT, \"diff\", \"--name-only\"]).decode('utf-8')\n cached_files = subprocess.check_output(\n [GIT, \"diff\", \"--name-only\", \"--cached\"]).decode('utf-8')\n return [filename for filename\n in set(files.split('\\n')) | set(cached_files.split('\\n'))\n if filename]", "async def audit_actions(self, ctx: Context) -> None:\n\n if ctx.invoked_subcommand is None:\n await ctx.send_help('auditaction')", "def test_lint_staged_msg_filename(self):\n # Create a commit first, before we stage changes. This ensures the repo is properly initialized.\n self.create_simple_commit(\"Sïmple title.\\n\")\n\n # Add some files, stage them: they should show up in the debug output as changed file\n filename1 = self.create_file(self.tmp_git_repo)\n git(\"add\", filename1, _cwd=self.tmp_git_repo)\n filename2 = self.create_file(self.tmp_git_repo)\n git(\"add\", filename2, _cwd=self.tmp_git_repo)\n\n tmp_commit_msg_file = self.create_tmpfile(\"WIP: from fïle test.\")\n\n output = gitlint(\n \"--msg-filename\",\n tmp_commit_msg_file,\n \"--staged\",\n \"--debug\",\n _cwd=self.tmp_git_repo,\n _tty_in=False,\n _err_to_out=True,\n _ok_code=[3],\n )\n\n # Determine variable parts of expected output\n expected_kwargs = self.get_debug_vars_last_commit()\n filenames = sorted([filename1, filename2])\n expected_kwargs.update(\n {\n \"changed_files\": filenames,\n \"changed_files_stats\": (\n f\"{filenames[0]}: 0 additions, 0 deletions\\n {filenames[1]}: 0 additions, 0 deletions\"\n ),\n }\n )\n\n # It's not really possible to determine the \"Date: ...\" line that is part of the debug output as this date\n # is not taken from git but instead generated by gitlint itself. As a workaround, we extract the date from the\n # gitlint output using a regex, parse the date to ensure the format is correct, and then pass that as an\n # expected variable.\n matches = re.search(r\"^Date:\\s+(.*)\", str(output), re.MULTILINE)\n if matches:\n expected_date = arrow.get(str(matches.group(1)), \"YYYY-MM-DD HH:mm:ss Z\").format(\"YYYY-MM-DD HH:mm:ss Z\")\n expected_kwargs[\"staged_date\"] = expected_date\n\n expected = self.get_expected(\"test_commits/test_lint_staged_msg_filename_1\", expected_kwargs)\n self.assertEqualStdout(output, expected)\n self.assertEqual(output.exit_code, 3)", "def get_changes_filename(self):\n return os.path.join(settings.DOKUWIKI_META_DIR, \"{}.changes\".format(self.namespace.replace(':', '/')))", "def getFileDiff(self, filename, revisionOld, revisionNew):", "def collect_changes(repo, commit, repo_path):\n\n changes = []\n\n for parent in commit.parents:\n for d in parent.diff(commit, create_patch=True):\n diff = d.diff.decode('utf-8')\n\n for change in diff.split('\\n'):\n if change.startswith('@@'):\n info = [x for x in change.split() if x != '@@']\n # Parse the beginning line numbers of the unified diff.\n old_line_number = int(info[0][1:].split(',')[0])\n new_line_number = int(info[1][1:].split(',')[0])\n elif change.startswith('-'):\n changes.append(Change('del', change[1:], old_line_number, SourceFileSnapshot(d.a_path, repo, commit, repo_path)))\n old_line_number += 1\n elif change.startswith('+'):\n changes.append(Change('add', change[1:], new_line_number, SourceFileSnapshot(d.b_path, repo, commit, repo_path)))\n new_line_number += 1\n elif change == '\\\\ No newline at end of file':\n continue\n else:\n old_line_number += 1\n new_line_number += 1\n\n return changes", "def revisionist_commit_history():\n return '\"{}\" --{}'.format(whatthecommit(), celebrity_name())", "def new_or_changed_files(self, commit: Commit) -> Set:\n diff = GitDiff(self._commit.diff(commit))\n return {change.b_path for change in diff.new_file_iter()}", "def file_changes(self):\n new = []\n changed = []\n deleted = []\n parent = self.parent_tag\n # Loop through the files and find the ones that have changed\n for relative_path, file_dict in self.checksum[\"files\"].items():\n if relative_path not in parent[\"files\"]:\n new.append(relative_path)\n elif file_dict[\"checksum\"] != parent[\"files\"][relative_path][\"checksum\"]:\n changed.append(relative_path)\n # Loop through the parent files and see which files have been deleted\n for relative_path in parent[\"files\"].keys():\n if relative_path not in self.checksum[\"files\"]:\n deleted.append(relative_path)\n return {\"new\": new, \"changed\": changed, \"deleted\": deleted}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }