query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Find a specific event by `type`, `start_index` and `data` When matching the event `data`, it assumes `data` is JSON encoded dictionary, and returns the event if the `kwargs` is subset of the dictionary decoded from event `data` field.
async def find_event(self, event_type: str, start_index: int = 0, **kwargs: Any) -> Optional[Event]: events = await self.events(start_index) events = [e for e in events if e.type == event_type] for e in events: if _match(json.loads(e.data), **kwargs): return e
[ "def find_event(self,resource,events,start,end):\n tpast = end + datetime.timedelta(0, 1) #after end\n t = tpast\n for log_time in events:\n # need to abstract in_sync comparison, should the events be dicts or\n # Resource objects?\n if (log_time>=start and log_time<=end and log_time<t and\n resource['uri']==events[log_time]['uri'] and\n ( resource['md5']==events[log_time]['md5'] or\n ( resource['changetype']=='DELETED' and events[log_time]['changetype']=='DELETED')) ):\n t=log_time\n return( None if t==tpast else t )", "def find(self, data_type: str, resource_attributes=None, metadata_attributes=None, start_token: str = None,\n limit: int = None,\n consistent_read: bool = None):\n if resource_attributes is not None and metadata_attributes is not None:\n raise InvalidArgumentsException(\"Provide Resource or Metadata attributes to search, but not both\")\n\n if limit is not None and not isinstance(limit, int):\n raise InvalidArgumentsException(\"Limit must be an Integer\")\n\n if consistent_read is not None and not isinstance(consistent_read, bool):\n raise InvalidArgumentsException(\"Consistent Read must be a Boolean\")\n\n search_request = {}\n if resource_attributes is not None and not isinstance(resource_attributes, dict):\n raise InvalidArgumentsException(\"Resource Attributes must be a Dictionary\")\n else:\n search_request[params.RESOURCE] = resource_attributes\n\n if metadata_attributes is not None and not isinstance(metadata_attributes, dict):\n raise InvalidArgumentsException(\"Metadata Attributes must be a Dictionary\")\n else:\n search_request[params.METADATA] = metadata_attributes\n\n if start_token is not None:\n search_request[params.EXCLUSIVE_START_KEY] = start_token\n\n if limit is not None:\n search_request[params.QUERY_PARAM_LIMIT] = limit\n\n if consistent_read is not None and consistent_read is True:\n search_request[params.QUERY_PARAM_CONSISTENT] = \"True\"\n\n # return POST /find\n return self._handle_response(\n self._http_handler.post(data_type=data_type, path=\"find\", post_body=search_request))", "def get_event_id(data_dict, start_time):\n if start_time in data_dict.keys():\n return data_dict[start_time]['id']", "def get_events(data):\n query_params = data.GET.dict()\n if not query_params:\n\n # If no payload is passed to the request, simply fetch future approved events\n start_date = datetime.now(timezone(TIMEZONE))\n\n # TODO: When the user first visits the homepage, all events occurring\n # in the week are fetched. Should this be changed instead to display\n # only events for the current day?\n end_date = datetime.now(timezone(TIMEZONE)) + timedelta(days=7)\n\n events = models.Event.objects.filter(approval_status=constants.EventApprovalStatus.APPROVED.name,\n start_datetime__range=(start_date, end_date))\\\n .order_by('start_datetime')\n return HttpResponse(json.dumps(make_events_data_response(events)))\n\n if 'isMonthly' in query_params and query_params['isMonthly'] == 'true':\n # Fetch events for the whole month\n\n month = int(query_params['month'])\n\n # TODO: Ensure that timezone differences are properly accounted for\n # when using the `__month` filter\n events = models.Event.objects.filter(approval_status=constants.EventApprovalStatus.APPROVED.name,\n start_datetime__month=month)\\\n .order_by('start_datetime')\n return HttpResponse(json.dumps(make_events_data_response(events)))\n\n else:\n # Fetch events for a selected date\n day = query_params['day']\n month = query_params['month']\n year = query_params['year']\n start_date = datetime.strptime(f\"{year}-{month}-{day} 00:00:00\", \"%Y-%m-%d %H:%M:%S\")\n end_date = datetime.strptime(f\"{year}-{month}-{day} 23:59:59\", \"%Y-%m-%d %H:%M:%S\")\n\n current_timezone = timezone(TIMEZONE)\n events = models.Event.objects.filter(approval_status=constants.EventApprovalStatus.APPROVED.name,\n start_datetime__range=(current_timezone.localize(start_date),\n current_timezone.localize(end_date))) \\\n .order_by('start_datetime')\n return HttpResponse(json.dumps(make_events_data_response(events)))", "def search():\n #get the name given\n name = request.args.get('q')\n #get the given page and number of events or set them to default\n page = request.args.get(\"page\", default=1, type=int)\n per_page = request.args.get(\"limit\", default=15, type=int)\n if name:\n found_events = Events.get_events_by_name(name, page, per_page)\n if found_events.items:\n event_list = make_event_list(found_events.items)\n return jsonify(event_list), 200\n return jsonify({\"message\" : \"there are no more events matching the given name\"}), 404\n return jsonify({\"message\" : \"can not search events, provide event name\"}), 400", "def get_events(data):\n\n return data[\"events\"]", "def events_filter():\n #get the incoming parameters\n location = request.args.get(\"location\")\n category = request.args.get(\"category\")\n #get the given page and number of events or set them to default\n page = request.args.get(\"page\", default=1, type=int)\n per_page = request.args.get(\"limit\", default=15, type=int)\n #check which parameter was given and use it to query the database\n if location and category:\n #if both location and category have been given,filter by both\n found_events = Events.filter_events(location, category, page, per_page)\n if found_events.items:\n event_list = make_event_list(found_events.items)\n return jsonify(event_list), 200\n return jsonify({\"message\" : \"there are no more {} events in {}\".format(category, location)}), 404\n elif location:\n found_events = Events.get_events_by_location(location, page, per_page)\n if found_events.items:\n event_list = make_event_list(found_events.items)\n return jsonify(event_list), 200\n return jsonify({\"message\" : \"there are no more events in {}\".format(location)}), 404\n elif category:\n found_events = Events.get_events_by_category(category, page, per_page)\n if found_events.items:\n event_list = make_event_list(found_events.items)\n return jsonify(event_list), 200\n return jsonify({\"message\" : \"there are no more {} events\".format(category)}), 404\n else:\n return jsonify({\"message\" : \"can not search events with the given parameter\"}), 400", "def test_08_api_can_get_one_event(self):\n sample_event_id = 'kulke:44518'\n response = self.app.get('/api/events/%s' % sample_event_id, headers=headers)\n data = json.loads(response.get_data())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(data['event']['id'], sample_event_id)\n self.assertEqual(data['event']['user'], username)", "def events(request):\n try:\n if request.method == 'GET':\n events_list = Events.retrieve_all()\n if events_list is not []: # not empty list\n node_id = request.GET.get('node_id', '')\n user_id = request.GET.get('user_id', '')\n status = request.GET.get('status', '')\n\n if status is not '' and status not in data_checker.VALID_EVENT_STATUS_LIST:\n raise ValueError('Status ' + status + ' is not valid')\n\n node_search = node_id is not ''\n user_search = user_id is not ''\n status_search = status is not ''\n\n events_search_list = []\n\n if node_search or user_search or status_search: # has parameters to search\n if node_search and user_search and status_search: # search by node, user and status\n for event in events_list:\n if event['node_id'] == node_id and event['user_id'] == user_id and event['status'] == status:\n events_search_list.append(event)\n\n elif node_search and user_search: # search by node and user\n for event in events_list:\n if event['node_id'] == node_id and event['user_id'] == user_id:\n events_search_list.append(event)\n\n elif user_search and status_search: # search by user and status\n for event in events_list:\n if event['user_id'] == user_id and event['status'] == status:\n events_search_list.append(event)\n\n elif node_search and status_search: # search by node and status\n for event in events_list:\n if event['node_id'] == node_id and event['status'] == status:\n events_search_list.append(event)\n\n elif user_search: # search only by user\n for event in events_list:\n if event['user_id'] == user_id:\n events_search_list.append(event)\n\n elif node_search: # search only by node\n for event in events_list:\n if event['node_id'] == node_id:\n events_search_list.append(event)\n\n elif status_search: # search only by status\n for event in events_list:\n if event['status'] == status:\n events_search_list.append(event)\n\n resp = {\n 'success': 'true',\n 'data': events_search_list\n }\n\n else: # all without parameters\n resp = {\n 'success': 'true',\n 'data': events_list\n }\n\n else:\n resp = {\n 'success': 'true',\n 'data': events_list\n }\n\n elif request.method == 'POST':\n data = JSONParser().parse(request)\n data_checker.check_event(data, request.method)\n\n created_event_key = Events.create(data)\n\n if created_event_key:\n print 'create event successful'\n if 'user_id' not in data:\n resp = {\n 'success': 'true',\n 'data': created_event_key\n }\n else:\n all_events_list = Events.retrieve_all()\n resp_events = []\n for event in all_events_list:\n if event['user_id'] == data['user_id'] and event['status'] == data_checker.EVENT_UNREAD:\n resp_events.append(event)\n\n resp = {\n 'success': 'true',\n 'data': resp_events\n }\n else:\n raise RuntimeError('Orchestrate service temporarily unavailable')\n else:\n raise NotImplementedError('Only GET, POST methods are allowed')\n\n return JSONResponse(resp)\n except Exception, e:\n err = {\n 'success': 'false',\n 'data': {},\n 'err_message': str(e)\n }\n\n return JSONResponse(err)", "def find_venue_events(venue_id, page):\n\n if session['startdate']:\n start_date = session['startdate']\n else:\n start_date = None\n\n if session['enddate']:\n end_date = session['enddate']\n else:\n end_date = None\n\n params = {'client_id': CLIENT_ID,\n 'client_secret': CLIENT_SECRET,\n 'venue.id': venue_id,\n 'datetime_local.gte': start_date,\n 'datetime_local.lte': end_date,\n 'type': 'concert',\n 'per_page': 20,\n 'page': page}\n\n response = requests.get(SG_URL + 'events', params=params)\n\n return response.json()", "def test_get_event_json(self, mock_request_get):\n # We want two possible responses, first a good, 200 response, and\n # then a 404 response (a response that doesn't provide JSON). We\n # need to make sure we're handling the ValueError (JSONDecodeError).\n mock_good_response = mock.MagicMock()\n mock_good_response.status_code = 200\n mock_good_response.json.return_value = {'some': 'json'}\n\n mock_bad_response = mock.MagicMock()\n mock_bad_response.status_code = 404\n mock_bad_response.json.side_effect = ValueError()\n\n mock_request_get.side_effect = [\n mock_good_response,\n mock_bad_response\n ]\n\n EVENTICS_CONFIG['EVENT_SOURCE'] = 'http://localhost:9200/event/<event_slug>/'\n\n source_json, source_status = get_event_json('myevent')\n self.assertEqual(source_status, 200)\n mock_request_get.assert_called_with('http://localhost:9200/event/myevent/')\n\n source_json, source_status = get_event_json('myevent')\n self.assertEqual(source_status, 404)\n self.assertEqual(source_json, {})", "def from_json(cls, json_data):\n try:\n data = json.loads(json_data)\n except:\n logger.debug('No valid JSON data.')\n return None\n try:\n events_type = data.get('events_type')\n counter = data.get('counter')\n events = [cls._get_event(event=e) for e in data.get('events', [])]\n obj = cls(events_type=events_type, counter=counter, events=events)\n except Exception as e:\n logger.debug('Not a valid LogEvents object: {0}'.format(repr(e)))\n obj = None\n return obj", "def _params_check(record_type, version, **kwargs):\n if record_type == STATUS_CHANGES and version >= Version._040_() and \"event_time\" not in kwargs:\n raise TypeError(\"The 'event_time' query parameter is required for status_changes requests.\")\n\n elif record_type == TRIPS and version >= Version._040_() and \"end_time\" not in kwargs:\n raise TypeError(\"The 'end_time' query parameter is required for trips requests.\")\n\n elif record_type == EVENTS:\n if \"start_time\" not in kwargs and \"end_time\" not in kwargs:\n raise TypeError(\"The 'start_time' and 'end_time' query paramters are required for events requests.\")\n\n two_weeks = Client._date_format(datetime.datetime.utcnow() - datetime.timedelta(days=14), version, EVENTS)\n start = Client._date_format(kwargs[\"start_time\"], version, EVENTS)\n end = Client._date_format(kwargs[\"end_time\"], version, EVENTS)\n\n # less than --> earlier in time\n if start < two_weeks or end < two_weeks:\n raise ValueError(\"The 'start_time' and 'end_time' query parameters must be within two weeks from now.\")\n\n elif record_type == VEHICLES:\n # currently no vehicles specific param checks\n pass", "def get_event_by_timestamp(self, time: dt):\n # ensure that the given time uses the same timezone as the computer\n now = dt.now()\n time = time.astimezone(now.tzinfo)\n\n events = self.get_events()\n filtered_events = []\n # find the wanted event\n for e in events:\n event_start = next(v for k, v in e[\"start\"].items() if \"date\" in k)\n event_start = dt.fromisoformat(event_start).astimezone(now.tzinfo)\n\n event_end = next(v for k, v in e[\"end\"].items() if \"date\" in k)\n event_end = dt.fromisoformat(event_end).astimezone(now.tzinfo)\n\n # check if the given time is between the start and end of an event\n if time >= event_start and time <= event_end:\n filtered_events.append(e)\n return filtered_events", "def get_event(self, type):\n els = self.get_events(type)\n if els:\n if len(els) != 1:\n raise ValueError(\"There are %s events of type %s found -- \" \\\n \"expected at most one\" % (len(els), type))\n return els[0]", "def deserialize(\n self, inputs: Dict[str, Readable], options: Dict = None\n ) -> EventDataset:\n self.__validate_inputs(inputs)\n if not options:\n options = {}\n\n with performance_logging(\"load data\", logger=logger):\n raw_events = json.load(inputs[\"event_data\"])\n metadata = load_metadata(\n inputs[\"metadata\"], provider=Provider.METRICA\n )\n\n with performance_logging(\"parse data\", logger=logger):\n\n wanted_event_types = [\n EventType[event_type.upper()]\n for event_type in options.get(\"event_types\", [])\n ]\n\n events = []\n for i, raw_event in enumerate(raw_events[\"data\"]):\n\n if raw_event[\"team\"][\"id\"] == metadata.teams[0].team_id:\n team = metadata.teams[0]\n elif raw_event[\"team\"][\"id\"] == metadata.teams[1].team_id:\n team = metadata.teams[1]\n else:\n raise Exception(\n f\"Unknown team_id {raw_event['team']['id']}\"\n )\n\n player = team.get_player_by_id(raw_event[\"from\"][\"id\"])\n event_type = raw_event[\"type\"][\"id\"]\n subtypes = _parse_subtypes(raw_event)\n period = [\n period\n for period in metadata.periods\n if period.id == raw_event[\"period\"]\n ][0]\n previous_event = raw_events[\"data\"][i - 1]\n\n generic_event_kwargs = dict(\n # from DataRecord\n period=period,\n timestamp=raw_event[\"start\"][\"time\"],\n ball_owning_team=_parse_ball_owning_team(event_type, team),\n ball_state=BallState.ALIVE,\n # from Event\n event_id=None,\n team=team,\n player=player,\n coordinates=(_parse_coordinates(raw_event[\"start\"])),\n raw_event=raw_event,\n )\n\n iteration_events = []\n\n if event_type in MS_PASS_TYPES:\n pass_event_kwargs = _parse_pass(\n event=raw_event,\n previous_event=previous_event,\n subtypes=subtypes,\n team=team,\n )\n\n event = PassEvent.create(\n **pass_event_kwargs,\n **generic_event_kwargs,\n )\n\n elif event_type == MS_EVENT_TYPE_SHOT:\n shot_event_kwargs = _parse_shot(\n event=raw_event,\n previous_event=previous_event,\n subtypes=subtypes,\n )\n event = ShotEvent.create(\n **shot_event_kwargs,\n **generic_event_kwargs,\n )\n\n elif subtypes and MS_EVENT_TYPE_DRIBBLE in subtypes:\n take_on_event_kwargs = _parse_take_on(subtypes=subtypes)\n event = TakeOnEvent.create(\n qualifiers=None,\n **take_on_event_kwargs,\n **generic_event_kwargs,\n )\n\n elif event_type == MS_EVENT_TYPE_CARRY:\n carry_event_kwargs = _parse_carry(\n event=raw_event,\n )\n event = CarryEvent.create(\n qualifiers=None,\n **carry_event_kwargs,\n **generic_event_kwargs,\n )\n\n elif event_type == MS_EVENT_TYPE_RECOVERY:\n event = RecoveryEvent.create(\n result=None,\n qualifiers=None,\n **generic_event_kwargs,\n )\n\n elif event_type == MS_EVENT_TYPE_FOUL_COMMITTED:\n event = FoulCommittedEvent.create(\n result=None,\n qualifiers=None,\n **generic_event_kwargs,\n )\n\n else:\n event = GenericEvent.create(\n result=None,\n qualifiers=None,\n event_name=raw_event[\"type\"][\"name\"],\n **generic_event_kwargs,\n )\n\n if _include_event(event, wanted_event_types):\n events.append(event)\n\n # Checks if the event ended out of the field and adds a synthetic out event\n if event.result in OUT_EVENT_RESULTS:\n generic_event_kwargs[\"ball_state\"] = BallState.DEAD\n if raw_event[\"end\"][\"x\"]:\n generic_event_kwargs[\n \"coordinates\"\n ] = _parse_coordinates(raw_event[\"end\"])\n generic_event_kwargs[\"timestamp\"] = raw_event[\"end\"][\n \"time\"\n ]\n\n event = BallOutEvent.create(\n result=None,\n qualifiers=None,\n **generic_event_kwargs,\n )\n\n if _include_event(event, wanted_event_types):\n events.append(event)\n\n return EventDataset(\n metadata=metadata,\n records=events,\n )", "def meta_event(self, meta_type, data, time):", "def event_by_id(request, event_id):\n if request.method == 'GET':\n print 'get event by id'\n try:\n if event_id == '':\n raise ValueError('No ID is given while trying to get event by ID')\n\n event_get = Events.retrieve_by_id(event_id)\n if 'code' in event_get and event_get['code'] == 'items_not_found':\n raise ValueError('No event found with given id=' + event_id)\n\n event_response = {\n 'success': 'true',\n 'data': event_get\n }\n return JSONResponse(event_response)\n except Exception, e:\n err = {\n 'success': 'false',\n 'data': {},\n 'err_message': str(e)\n }\n return JSONResponse(err)\n\n elif request.method == 'PUT':\n print 'put update by event id'\n try:\n data = JSONParser().parse(request)\n data_checker.check_event(data, request.method)\n\n update_event_key = Events.update(event_id, data)\n\n if update_event_key:\n print 'create event successful'\n if 'user_id' not in data:\n resp = {\n 'success': 'true',\n 'data': update_event_key\n }\n else:\n all_events_list = Events.retrieve_all()\n resp_events = []\n for event in all_events_list:\n if event['user_id'] == data['user_id'] and event['status'] == data_checker.EVENT_UNREAD:\n resp_events.append(event)\n\n resp = {\n 'success': 'true',\n 'data': resp_events\n }\n else:\n raise RuntimeError('Orchestrate service temporarily unavailable')\n\n except Exception, e:\n err = {\n 'success': 'false',\n 'data': {},\n 'err_message': str(e)\n }\n return JSONResponse(err)\n\n return JSONResponse(resp)\n else:\n err = {\n \"success\": \"false\",\n \"err_message\": \"Only GET and PUT method is allowed\",\n \"data\": {}\n }\n return JSONResponse(err)", "def search_base(self, **kwargs):\n\n for (k, v) in kwargs.items():\n capit = ('name', 'surname', 'city')\n if k in capit:\n v = v.title()\n elif k == 'streetname':\n v = street_parser(v, '')[0]\n elif k == 'phone':\n v = phone_parser(v)[0]\n elif k == 'birthday':\n y, m, d = date_parser(v)\n v = dt.date(y, m, d)\n elif k in ['year', 'month', 'day']:\n v = int(v)\n self.sorting(k)\n # found items (None, a Person object or list of objects)\n found = search(self, v, k)\n return found" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Log account events as INFO Does nothing if get events API is not implemented.
async def log_events(self) -> None: events = await self.dump_events() if events: self.client.logger.info("account(%s) events: %s", self.id, events)
[ "def info(self, *args: Any, **kwargs: Any) -> None:\n\n self.client.logger.info(*args, **kwargs)", "def cmd_info(self):\r\n self.log.setLevel(logging.INFO)\r\n self.log.info('Switching to INFO threshold')", "def on_account(self, account: AccountData) -> None:\n self.on_event(EVENT_ACCOUNT, account)\n # self.on_event(EVENT_ACCOUNT + account.vt_accountid, account)", "def info(*args, **kwargs):\n applogger.info(*args, **kwargs)\n pass", "def logEvent(self):\n\n curDate = conDateTimeToNum(datetime.now())\n\n msg = self.getBasicEventMsg()\n\n self.getEventQ().getHub().getLog().insertLogEntry(curDate,self.eventType,msg)", "def info(self,msg):\n self.logger.info(msg)", "def info(self, *messages):\n self.log(LOGLEVELS[\"info\"], \"\\n[Info]\", *messages)", "def log_event(event_type, request, extra_data=None, level=logging.INFO):\n event_dict = {\n \"event_type\": event_type,\n \"timestamp\": strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()),\n \"ip_address\": request.META[\"REMOTE_ADDR\"],\n }\n user_identifier = _default_get_user_identifier(request)\n if user_identifier:\n event_dict[\"user\"] = user_identifier\n\n if extra_data:\n event_dict.update(extra_data)\n\n logger.log(level, f\"ZYGOAT: {json.dumps(event_dict)}\")", "def access_log(request, msg=None, level=None):\n if level is None:\n level = logging.INFO\n ai = AccessInfo(request)\n ai.log(level, msg)", "def log_events():\n print(\"Getting log events from database...\")\n response_message = api_controller.get_all_log_events()\n return jsonify(response_message)", "def info(self, msg, *args, **kwargs):\n self.write(msg, level='INFO', *args, **kwargs)", "def hook_Log(state, level, ea):\n DeepManticore(state).api_log(level, ea)", "def acc_status():\n print(\"\\nAccount Information\")\n for info in acc_info:\n print(info, \":\", acc_info[info])", "def update_logging(self):\n # custom loggers passed into tcex would not have log_info method\n if not hasattr(self.tcex.logger, 'log_info'):\n return\n\n if self._default_args.tc_log_level is None:\n # some Apps use logging while other us tc_log_level. ensure tc_log_level is always\n # available.\n self._default_args.tc_log_level = self._default_args.logging\n\n self.tcex.logger.log_info(self._default_args)\n\n # add api handler\n if self._default_args.tc_token is not None and self._default_args.tc_log_to_api:\n self.tcex.logger.add_api_handler(level=self.tcex.default_args.tc_log_level)\n\n # add rotating log handler\n self.tcex.logger.add_rotating_file_handler(\n name='rfh',\n filename=self._default_args.tc_log_file,\n path=self._default_args.tc_log_path,\n backup_count=self._default_args.tc_log_backup_count,\n max_bytes=self._default_args.tc_log_max_bytes,\n level=self.tcex.default_args.tc_log_level,\n )\n\n # replay cached log events\n self.tcex.logger.replay_cached_events(handler_name='cache')", "def log_info(self, fmt, *args, end=os.linesep): \n self.log(fmt, *args, levels='info', end=end)", "def info(self, message: str, **extra: t.Any):\n self.log(logging.INFO, message, extra)", "def log_event(self, event, request = None):\n self.get_mewlosite().logevent(event, request)", "def printEventInfo(self):\n\n print self.eventType + ' - ' + conDateNumToDateStr(self.numDate)", "def get_account_id(event):\n return event['account']" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dump account events as JSON encoded string (well formatted, and indent=2) Returns empty string if get events API is not implemented.
async def dump_events(self) -> str: try: return json.dumps(list(map(self.event_asdict, await self.events())), indent=2) except ClientError: return ""
[ "def to_str(self):\n import json\n from JsonEncoder import JSONEncoder\n\n self._add_meta()\n return json.dumps(self.events, cls=JSONEncoder)", "def get_events():\n\n #immplementation\n\n return json.dumps(events)", "def events(self):\n r = requests.get(self.uri+'events')\n r.raise_for_status()\n return r.json()", "def to_json(self):\n log_dict = dict(events_type=self.events_type,\n counter=len(self.events),\n events=[LogEvents._event_to_dict_ts(e) for e in self.events]\n )\n return json.dumps(log_dict)", "async def log_events(self) -> None:\n\n events = await self.dump_events()\n if events:\n self.client.logger.info(\"account(%s) events: %s\", self.id, events)", "def get_all_events():\n\n events = Event.query.all() # list of objs\n\n events_list = []\n\n for event in events:\n events_list.append(as_dict(event))\n\n return jsonify(events_list)", "def log_events():\n print(\"Getting log events from database...\")\n response_message = api_controller.get_all_log_events()\n return jsonify(response_message)", "def get_events(data):\n\n return data[\"events\"]", "def get_all_events(request):\n events = Event.objects.all()\n data = serializers.serialize(\"json\", events)\n return HttpResponse(data, content_type=\"application/json\")", "def get_events():\n req = request\n start_date = request.args.get(\"start_date\")\n end_date = request.args.get(\"end_date\")\n desc = request.args.get(\"event_desc\")\n sqlx, sqlx_count = DBAccess.bld_query_sql(start_date, end_date, desc)\n \n list_result = DBAccess.get_events(sqlx, sqlx_count)\n if list_result[0] == 'error':\n sj = jsonify({\"events_error\": list_result[1]})\n else:\n sj = jsonify({\"events_details\": list_result[1]})\n return sj", "def handle_evr_get():\n return json.dumps(evr.getDefaultDict().toJSON())", "def _event_serialize(self, event, **kwargs):\n return SchemaParser.serialize_event(event)", "def all_event_types():\n\n event_types = Event_Type.query.all()\n\n return jsonify([event_type.serialize() for event_type in event_types])", "def to_ics_event_string(self) -> str:\n data = self.to_event_dict()\n start = (\n data[\"start\"][\"dateTime\"].replace(\"-\", \"\").replace(\":\", \"\").replace(\" \", \"\")\n )\n end = data[\"end\"][\"dateTime\"].replace(\"-\", \"\").replace(\":\", \"\").replace(\" \", \"\")\n return (\n \"BEGIN:VEVENT\\n\"\n + f'DTSTAMP:{datetime.datetime.now().strftime(\"%Y%m%dT%H%M%S\")}Z\\n'\n + f'DTSTART;TZID={data[\"start\"][\"timeZone\"]}:{start}\\n'\n + f'DTEND;TZID={data[\"end\"][\"timeZone\"]}:{end}\\n'\n + f'RRULE:{data[\"recurrence\"][0][6:]}\\n'\n + f\"UID:{uuid.uuid4()}\\n\"\n + f'DESCRIPTION:{data[\"description\"]}\\n'\n + f'LOCATION:{data[\"location\"]}\\n'\n + \"SEQUENCE:0\\n\"\n + \"STATUS:CONFIRMED\\n\"\n + f'SUMMARY:{data[\"summary\"]}\\n'\n + \"TRANSP:OPAQUE\\n\"\n + \"END:VEVENT\\n\"\n )", "def dump_tournament(tournament, event):\r\n ## Get tournament name and date\r\n tournament_url = \"https://api.smash.gg/tournament/\" + tournament\r\n t = requests.get(tournament_url)\r\n tournament_data = t.json()\r\n tournament_name = tournament_data[\"entities\"][\"tournament\"][\"name\"]\r\n timezone = tournament_data[\"entities\"][\"tournament\"][\"timezone\"]\r\n if not timezone:\r\n timezone = \"UTC\"\r\n\r\n # Scrape event page in case event ends earlier than tournament\r\n event_url = \"https://api.smash.gg/tournament/\" + tournament + \"/event/\" + event + \"-singles\"\r\n e = requests.get(event_url)\r\n event_data = e.json()\r\n event_id = event_data[\"entities\"][\"event\"][\"id\"]\r\n\r\n timestamp = event_data[\"entities\"][\"event\"][\"endAt\"]\r\n if not timestamp:\r\n timestamp = tournament_data[\"entities\"][\"tournament\"][\"endAt\"]\r\n\r\n # Get local date\r\n date = datetime.fromtimestamp(timestamp, pytz.timezone(timezone)).date()\r\n\r\n ## Get standings\r\n standing_string = \"/standings?expand[]=attendee&per_page=100\"\r\n standing_url = event_url + standing_string\r\n s = requests.get(standing_url)\r\n s_data = s.json()\r\n count = s_data[\"total_count\"]\r\n print(\"Total entrants:\", count)\r\n\r\n # API limits requests to 100 at a time, so we need to request multiple pages\r\n pages = int(math.ceil(count/100.0))\r\n print(\"Pages: \", pages)\r\n\r\n attendees_dict = []\r\n\r\n while len(attendees_dict) < count:\r\n for i in range(pages):\r\n page = i + 1\r\n if page != 1:\r\n standing_url = event_url + standing_string + \"&page=\" + str(page)\r\n s = requests.get(standing_url)\r\n s_data = s.json()\r\n\r\n players = s_data[\"items\"][\"entities\"][\"attendee\"]\r\n\r\n # Find each player's placement in the given game\r\n for player in range(len(players)):\r\n smashgg_id = players[player][\"playerId\"]\r\n name = players[player][\"player\"][\"gamerTag\"]\r\n print(\"Name: \" + name)\r\n entered_events = players[player][\"entrants\"]\r\n for event in range(len(entered_events)):\r\n if entered_events[event][\"eventId\"] == event_id:\r\n attendees_dict.append({\"name\": name,\r\n \"place\": entered_events[event][\"finalPlacement\"],\r\n \"smashgg_id\": smashgg_id})\r\n print(\"Len: \" + str(len(attendees_dict)))\r\n\r\n tournament_dict = {\"name\": tournament_name,\r\n \"game\": event,\r\n \"date\": str(date),\r\n \"url\": event_url}\r\n return tournament_dict, attendees_dict", "def describe_events(self):\n return self.connection_manager.call(\n service=\"cloudformation\",\n command=\"describe_stack_events\",\n kwargs={\"StackName\": self.stack.external_name},\n )", "def getUserEvents(self):\n return self.base.get(\"user_events\", [])", "def get_events(self):\n return self.events", "def get_events(self):\n key = (self.user_name, self.bucket_name, \"event\")\n data = yield get_relation(key)\n returnValue(dict([(data[i], {\"id\":i}) for i in data]))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns `Event` as dictionary object. As we use JSONencoded string field, this function tries to decoding all JSONencoded string as dictionary for pretty print event data in log.
def event_asdict(self, event: Event) -> Dict[str, Any]: ret = asdict(event) try: ret["data"] = json.loads(event.data) except json.decoder.JSONDecodeError: pass return ret
[ "def __decode(self, message):\n message = message.decode(\"UTF-8\")\n try:\n data = json.loads(message)\n except ValueError:\n data = None\n\n if type(data) is dict and 'event' in data:\n return data['event']\n\n return None", "def from_json(cls, event):\n # type: (Any) -> EventGridEvent\n dict_event = _get_json_content(event)\n return cast(EventGridEvent, EventGridEvent.from_dict(dict_event))", "def get_events():\n\n #immplementation\n\n return json.dumps(events)", "def _event_serialize(self, event, **kwargs):\n return SchemaParser.serialize_event(event)", "def fromjson(cls, json_string: str) -> \"Event\":\n obj = json.loads(json_string)\n return cls(UUID(obj[\"event_id\"]), obj[\"event_type\"], obj[\"schema_name\"], obj[\"table_name\"], obj[\"row_id\"])", "def to_str(self):\n import json\n from JsonEncoder import JSONEncoder\n\n self._add_meta()\n return json.dumps(self.events, cls=JSONEncoder)", "def read_json(s):\n def str2num(d):\n if isinstance(d, OrderedDict):\n timestamp_format = '%Y-%m-%d %H:%M:%S'\n for k, v in d.items():\n if re.match('id*', k): d[k] = int(v)\n if k == 'amount': d[k] = float(v)\n if k == 'T' or k == 'D': d[k] = int(v)\n if k == 'timestamp':\n d[k] = get_timestamp(datetime.strptime(d[k], timestamp_format))\n d['time_in'] = get_timestamp(datetime.now())\n return d\n\n event = json.loads(s, object_pairs_hook=OrderedDict)\n return str2num(event)", "def ConvertEventToRow(self, event):\n if not event.get('__process__', False):\n return None\n\n row = {}\n\n row['uuid'] = event.get('uuid')\n row['time'] = event.get('time')\n row['startTime'] = event.get('startTime')\n row['decompressEndTime'] = event.get('decompressEndTime')\n row['endTime'] = event.get('endTime')\n row['duration'] = event.get('duration')\n row['status'] = []\n for code in event.get('status', []):\n row['status'].append({})\n row['status'][-1]['code'] = code\n row['messages'] = []\n for message in event.get('message', []):\n row['messages'].append({})\n row['messages'][-1]['message'] = message\n\n return json.dumps(row, allow_nan=False)", "def getEventDict(self):\n return self._eventDict", "def to_dict(self):\n log_dict = dict(events_type=self.events_type,\n counter=len(self.events),\n events=[LogEvents._event_to_dict_dt(e) for e in self.events]\n )\n return log_dict", "def to_json(self):\n log_dict = dict(events_type=self.events_type,\n counter=len(self.events),\n events=[LogEvents._event_to_dict_ts(e) for e in self.events]\n )\n return json.dumps(log_dict)", "def parse_event(self, event):", "def test_db_event_serialization(self):\n\n # Construct a json representation of a DbEvent model\n db_event_model_json = {}\n db_event_model_json['account'] = 'testString'\n db_event_model_json['db_name'] = 'testString'\n db_event_model_json['seq'] = 'testString'\n db_event_model_json['type'] = 'created'\n\n # Construct a model instance of DbEvent by calling from_dict on the json representation\n db_event_model = DbEvent.from_dict(db_event_model_json)\n assert db_event_model != False\n\n # Construct a model instance of DbEvent by calling from_dict on the json representation\n db_event_model_dict = DbEvent.from_dict(db_event_model_json).__dict__\n db_event_model2 = DbEvent(**db_event_model_dict)\n\n # Verify the model instances are equivalent\n assert db_event_model == db_event_model2\n\n # Convert model instance back to dict and verify no loss of data\n db_event_model_json2 = db_event_model.to_dict()\n assert db_event_model_json2 == db_event_model_json", "def parseEvent(primitive):\n event={}\n event = OrderedDict()\n for dat in primitive:\n # print dat\n if \":\" in dat:\n # Add entry to dictionary\n event[dat.split(\":\")[0]] = \":\".join(dat.split(\":\")[1:])\n elif len(event.keys())>0:\n # Try adding to previous entry\n event[event.keys()[-1]]\n return event", "def decode(self, obj):\n if isinstance(obj, EventJSONObject):\n evt = self.EventDecoder.decode(obj.evt)\n particles = [self.ParticleDecoder.decode(p) for p in obj.particles]\n for p in particles:\n p.evt = evt\n vertices = [self.VertexDecoder.decode(v) for v in obj.vertices]\n for v in vertices:\n v.evt = evt\n evt.particles = {p.barcode:p for p in particles}\n evt.vertices = {v.barcode:v for v in vertices}\n return evt\n \n objType = json.JSONDecoder().decode(obj).get(\"type\", None)\n if objType==\"particle\":\n return self.ParticleDecoder.decode(obj)\n elif objType==\"vertex\":\n return self.VertexDecoder.decode(obj)\n else:\n raise ValueError", "def test_parse_payload_from_event(self):\n event_payload = {\n 'created': 1326853478,\n 'livemode': False,\n 'id': 'evt_000',\n 'type': 'invoice.created',\n 'object': 'event',\n 'request': None,\n 'pending_webhooks': 1,\n 'api_version': '2020-03-02',\n 'data': {\n 'object': {\n 'date': 1433018770,\n 'id': 'in_000',\n 'period_start': 1433018770,\n 'period_end': 1433018770,\n 'lines': {\n 'data': [\n {\n 'id': 'sub_000',\n 'object': 'line_item',\n 'type': 'subscription',\n 'livemode': True,\n 'amount': 0,\n 'currency': 'usd',\n 'proration': False,\n 'period': {\n 'start': 1433162255,\n 'end': 1434371855\n },\n 'subscription': None,\n 'quantity': 1,\n 'plan': {\n 'interval': 'month',\n 'name': 'Gold',\n 'created': 1424879591,\n 'amount': 500,\n 'currency': 'usd',\n 'id': 'gold',\n 'object': 'plan',\n 'livemode': False,\n 'interval_count': 1,\n 'trial_period_days': 14,\n 'metadata': {},\n 'statement_descriptor': 'GOLD MONTHLY'\n },\n 'description': None,\n 'discountable': True,\n 'metadata': {}\n }\n ],\n 'total_count': 1,\n 'object': 'list',\n 'url': '/v1/invoices/in_000/lines'\n },\n 'subtotal': 0,\n 'total': 500,\n 'customer': 'cus_000',\n 'object': 'invoice',\n 'attempted': False,\n 'closed': True,\n 'forgiven': False,\n 'paid': True,\n 'livemode': False,\n 'attempt_count': 0,\n 'amount_due': 0,\n 'currency': 'usd',\n 'starting_balance': 0,\n 'ending_balance': 0,\n 'next_payment_attempt': None,\n 'webhooks_delivered_at': None,\n 'charge': None,\n 'discount': None,\n 'application_fee': None,\n 'subscription': 'sub_000',\n 'tax_percent': None,\n 'tax': None,\n 'metadata': {},\n 'statement_descriptor': None,\n 'description': None,\n 'receipt_number': '0009000'\n }\n }\n }\n\n parsed_payload = Invoice.parse_from_event(event_payload)\n\n assert parsed_payload['payment_id'] == 'cus_000'\n assert parsed_payload['plan'] == 'Gold'\n assert parsed_payload['receipt_number'] == '0009000'\n assert parsed_payload['description'] == 'GOLD MONTHLY'\n assert parsed_payload['period_start_on'] == datetime.date(2015, 6, 1)\n assert parsed_payload['period_end_on'] == datetime.date(2015, 6, 15)\n assert parsed_payload['currency'] == 'usd'\n assert parsed_payload['tax'] is None\n assert parsed_payload['tax_percent'] is None\n assert parsed_payload['total'] == 500", "def get_events(data):\n\n return data[\"events\"]", "async def dump_events(self) -> str:\n\n try:\n return json.dumps(list(map(self.event_asdict, await self.events())), indent=2)\n except ClientError:\n return \"\"", "def parse_log_file(self, log_file):\n msg = {}\n events = {}\n print \"Parsing %s ...\" % log_file\n for line in open(log_file, 'r'):\n log_entry = [entry.strip() for entry in line.split(\"|\")]\n log_time = parse_datetime(log_entry[0])\n if log_entry[3].find(\"Event: \") != -1:\n event_dict_string = log_entry[3][len(\"Event: \"):]\n event_dict = ast.literal_eval(event_dict_string)\n events[log_time] = event_dict\n else:\n msg[log_time] = log_entry[3]\n return (msg, events)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Log info to `client.logger`
def info(self, *args: Any, **kwargs: Any) -> None: self.client.logger.info(*args, **kwargs)
[ "def info(self,msg):\n self.logger.info(msg)", "def log() -> None:\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger('discord')\n logger.setLevel(logging.DEBUG)\n handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')\n handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s:%(message)s'))\n logger.addHandler(handler)", "def log(self, msg):\n self.logger.write(msg)", "def logger_info(self,text):\n logging.info(self.log_my_name()+' '+text)", "def info(*args, **kwargs):\n applogger.info(*args, **kwargs)\n pass", "def log(self) -> None:\n # Otherwise carry on...\n if self.v_level == 3:\n self.logger.info(self)\n elif self.v_level == 4:\n self.logger.debug(self)", "def hutil_log_info(hutil, message):\n if hutil is not None:\n hutil.log(message)\n else:\n print('Info: {0}'.format(message))", "def log_info(self, fmt, *args, end=os.linesep): \n self.log(fmt, *args, levels='info', end=end)", "def info(self, *messages):\n self.log(LOGLEVELS[\"info\"], \"\\n[Info]\", *messages)", "def access_log(request, msg=None, level=None):\n if level is None:\n level = logging.INFO\n ai = AccessInfo(request)\n ai.log(level, msg)", "def info(self, message, tenant=None):\n self.logger.info(message, extra={'tenant': tenant})", "def info(self, message: str, **extra: t.Any):\n self.log(logging.INFO, message, extra)", "def debug(self,msg):\n self.logger.debug(msg)", "def start_logger(self):\n # Logger just Works for Python3, this will be updated \n self.logger = logging.getLogger('Otto-CT-v0.0.1.beta') # Change logger\n self.logger.info('Otto Logger is been activated.')", "def hook_Log(state, level, ea):\n DeepManticore(state).api_log(level, ea)", "def logger_debug(self,text):\n logging.debug(self.log_my_name()+' '+text)", "def setup_logging():\n client = logging.Client()\n client.get_default_handler()\n client.setup_logging()", "def info(self, msg, *args, **kwargs):\n self.write(msg, level='INFO', *args, **kwargs)", "def log(self, msg):\n self.logs.append(str(msg))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the player's choice of token.
def get_player_token_choice(): # This function should make use of raw_input to ask the player what # token they'd like to use. The only valid tokens are 'x' and 'o', so # make sure to handle other inputs gracefully. while True : tokenchoice = raw_input('Which icon would you like to use? Enter "x" or "o" and press enter: ') if tokenchoice == 'x' or tokenchoice == 'X': print('Ok you are playing with "x"') return 'x' elif tokenchoice == 'o' or tokenchoice =='O': print('Ok you are playing with "o"') return 'o' else: print("I'm sorry. I don't understand that. Please try again\n")
[ "def choice(self):\n return self.__choice", "def player_choose(self) -> None:\n print(\"(1) Rock\\n(2) Paper\\n(3) Scissors\")\n self.human_choice = OPTIONS[int(input(\"Enter the number of your choice: \")) - 1]", "def get_user_choice():\n\n return input('Your choice: ')", "def playerSelection(player):\n print('\\nIs player {} a human or computer?'.format(player))\n print('1. Enter 1 if Human')\n print('2. Enter 2 if Computer')\n\n return makeChoice()", "def get_opponent_option(self) -> None:\n while True:\n is_bot = input(\"Play with bot ? (yes or no): \")\n try:\n if is_bot == \"yes\":\n return BOT\n elif is_bot == \"no\":\n return HUMAN\n else:\n raise Exception(\"Invalid Input\")\n except BaseException:\n print(\"Invalid Input\")", "def get_choice(self, preference):\n if preference < len(self.choices):\n return self.choices[preference]\n else:\n return \"\"", "def player_mode():\n try:\n print(\"\\n Choose an option:\")\n print(\"\\t 1.One player\")\n print(\"\\t 2.Two players\")\n option = input(\" >your option: \")\n option = validate_mode(option, \"player_mode\")\n if option == 1:\n return \"one_player\"\n elif option == 2:\n return \"two_players\"\n except InvalidInputMode as err:\n print(err)", "def get_system_choice():\n return random.choice(GAME_CHOICES)", "def get_pick_prompt(cls, ctx: RoleActionContext) -> Message:\n return messages.PICK_PROMPT[cls.name]", "def _read_token(self):\n return self.token", "def choice_value(self) -> str:\n return self.value[0]", "def get_choice_value(cls, choice: str) -> str:\n return cls[choice].value[0]", "def choose_character(self):\n self.transition(7)\n print(\"\"\"Avant de commencer ton aventure, qui veux tu incarner ?\n- Un guerrier fort et solide comme la pierre\n- Un archer agile et souple comme le vent\n- Un magicien intelligent et rusé comme le corbeau\"\"\")\n while True:\n try:\n player_choice = input('Je veux incarner un : ').lower()\n # Check if player_choice is in the roles class attribut\n player_class = Narrator.roles[player_choice]\n break\n except:\n print('Je ne reconnais pas ce personnage')\n return player_class", "def findChoice(self, choice: 'SoNode') -> \"int\":\n return _coin.SoVRMLSwitch_findChoice(self, choice)", "def get_token(self):\n return self._do_token", "def get_current_player(player_one_turn: bool) -> str:\r\n\r\n # Complete this function.\r\n if player_one_turn == True:\r\n return P1\r\n else:\r\n return P2", "def _get_select_question_input(): # pragma: no cover\n questions = [\n inquirer.List('answer',\n message='Do you wanna select this paper?',\n choices=[\n 'Skip', \n 'No', \n 'Yes', \n 'Save what I\\'ve done so far and leave'],\n ),\n ]\n return inquirer.prompt(questions).get('answer')", "def first_participant_choice():\n return random.randint(0, 2)", "def input_menu_choice():\n choice = str(input('Which option would you like to perform? [1 to 3] - ')).strip()\n print()\n return choice" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Asks the player where they want to place their token 19 and returns that answer.
def get_player_move(board, player_token): # Make use of the raw_input to ask the user a question. Make sure only # valid inputs work (use is_space_free function). The question should be # asked until the player gives a correct place for their token (a while # loop can help do that).
[ "def get_player_token_choice():\n\n # This function should make use of raw_input to ask the player what\n # token they'd like to use. The only valid tokens are 'x' and 'o', so\n # make sure to handle other inputs gracefully.\n while True :\n tokenchoice = raw_input('Which icon would you like to use? Enter \"x\" or \"o\" and press enter: ')\n if tokenchoice == 'x' or tokenchoice == 'X':\n print('Ok you are playing with \"x\"')\n return 'x'\n elif tokenchoice == 'o' or tokenchoice =='O':\n print('Ok you are playing with \"o\"')\n return 'o'\n else:\n print(\"I'm sorry. I don't understand that. Please try again\\n\")", "async def ask_bet(self, ctx):\r\n await ctx.send(\"What is the big blind (minimum bet) amount?\")", "async def magic8ball(ctx, question: str):\n answer = random.choice(strings['eight_ball'])\n reply = f\"{ctx.author.mention}, the magic 8 ball has spoken: \\\"{answer}\\\".\"\n await ctx.send(reply)\n return", "def hardversion() -> None:\n print(f\"Welcome {player}! Let's begin.\")\n global points\n q1: str = input(\"What is the name of the restaurant Monica was head chef at? \")\n if q1 == \"Alessandro's\":\n points = points + 1\n print(f\"Current points: {points}\")\n q2: str = input(\"What book did Chandler buy for Kathy? \")\n if q2 == \"The Velveteen Rabbit\":\n points = points + 1\n print(f\"Current points: {points}\")\n q3: str = input(\"Where did Pheobe's love interest, David, go to work? \")\n if q3 == \"Minsk\":\n points = points + 1\n print(f\"Current points: {points}\")\n print(\"Finally, a bonus question! This question is worth 2 points.\")\n randgen()", "def question(self, question):\n if self.use_STT:\n self.say(question)\n response = STT.wait_for_voice()\n else:\n naoqiutils.speak(question)\n response = raw_input(question + \"\\n> \")\n return response", "def ask_question(question) :\n logger.info('Asking wolframalpha.')\n try :\n cprint('Hmm..Thinking....','yellow')\n api_key = 'GLHKQ7-R5V9E6GU3Y'\n client = wolframalpha.Client(api_key)\n res = client.query(question)\n answer = next(res.results).text\n if 'Wolfram|Alpha' in answer:\n answer = answer.replace('Wolfram|Alpha',bot['name'])\n if 'no data available' in answer:\n answer = wiki_search(question,1) \n # search_google(question)\n return answer\n except :\n logger.info('Wolframalpha do not know the answer.')\n answer = wiki_search(question,1)\n logger.info(answer)\n # search_google(question)\n # answer = 'check browser.'\n return answer", "def choose_place_name_to_put_token(self):\n place_option = []\n for hunted in self.player.game.hunted:\n for card in hunted.played:\n if card.name not in place_option:\n place_option.append(card.name)\n for card in hunted.phand:\n if card.name not in place_option:\n place_option.append(card.name)\n return random.choice(place_option)", "def _ask_for_position(self) -> int:\n while True:\n pos_rep = input('Player {}, where will you play? '\n .format('1 (X)' if self._first_player_active\n else '2 (O)'))\n if self._is_valid(pos_rep):\n return int(pos_rep)\n else:\n print('Invalid input!')", "def ask(self, question):\n\n # Set the current question\n self.data['stimulus'] = question\n\n # Connect to Cleverbot and remember the response\n resp = self._send()\n\n # Add the current question to the conversation log\n self.conversation.append(question)\n\n parsed = self._parse(resp.text)\n\n # Set data as appropriate\n if self.data['sessionid'] != '':\n self.data['sessionid'] = parsed['conversation_id']\n\n # Add Cleverbot's reply to the conversation log\n self.conversation.append(parsed['answer'])\n\n return parsed['answer'].encode('latin-1').decode('utf-8')", "def vqa_prompt(self, question, answer=None) -> str:", "def choose_place_name_to_put_token(self):\n prob = collections.Counter()\n for hunted in self.player.game.hunted:\n possible_places = hunted.phand + hunted.played\n for card in possible_places:\n prob[card.name] += (1 / len(possible_places))\n total_prob_denominator = 0\n\n for cardname in prob:\n total_prob_denominator += prob[card]\n\n return random.choices(list(prob.keys()), weights=prob.values())[0]", "def get_word():\n\treturn raw_input('What word have you found?')", "def ask_for_player():\n question = [\n {\n 'type': 'input',\n 'name': 'player_name',\n 'message': 'Enter the player\\'s name',\n 'validate': PlayerValidator,\n }\n ]\n answer = prompt(question, style=style)\n return answer", "def askPlayer() -> None:\r\n print(\"Which symbol do you take?\")\r\n for data in Symbol:\r\n print(\"{}. {}\".format(data.value, data.name))", "def game_code():\n make_global()\n \n validLetters= \"abcdefghijklmnopqrstuvwxyz\"\n \n turns = 10\n \n guessed = \"\"\n \n \n while turns > 0:\n msg = \"\"\n missed = 0\n print(secret)\n for letter in secret:\n if letter in guessed:\n msg = msg + letter\n else:\n msg = msg + \"_\" + \" \"\n missed += 1\n if msg == secret:\n print(msg)\n print(\"YOU ARE CORRECT, THE WORD WAS: \", secret)\n break\n print(msg)\n guess = raw_input(\"GUESS THE WORD: \")\n \n if guess in validLetters:\n guessed = guessed + guess\n print(\"YOU HAVE \", turns, \"LEFT!\")\n elif guess == \"hint\":\n print(hints[hint_number])\n elif guess == \"math\":\n math_problems = [\"10 * 14\", \"(76+79)^(45646-((5705*8)+6)\", \n \"50*(58818614681861861818-58818614681861861817)\"]\n math_ans = [\"140\", \"1\", \"50\"]\n math = random.choice(math_problems)\n ans_num = math_problems.index(math)\n print(math)\n guess_num = raw_input(\"Please solve the problem:\")\n if guess_num == math_ans[ans_num]:\n print(\"Congratulation, you solved the problem! You get two\", \n \" more guesses!\")\n turns += 2\n print(\"YOU HAVE\", turns, \"LEFT!\")\n else:\n print(\"You lost two guesses!\")\n turns -= 2\n print(\"YOU HAVE\", turns, \" GUESSES LEFT!\")\n game_code()\n elif guess == \"break\":\n return \"Thank you for playing!!\"\n elif guess.lower() == secret.lower():\n print(\"CONGRATULATION, YOU ACTUALLY GOT THE ANSWER CORRECT! \",\n \"YOU CAN GET A 1600 IN THE SAT!\"\n )\n restart_button()\n else:\n print(\"ENTER A VALID LETTER\")\n print(msg)\n guess = raw_input(\"GUESS THE WORD: \")\n \n if guess not in secret: #Code that simulates ASKER for the game\n turns -= 1\n if turns == 9:\n print(\" o\")\n if turns == 8:\n print(\" o\")\n print(\" |\")\n if turns == 7:\n print(\" o\")\n print(\" |\")\n print(\" \\ \")\n if turns == 6:\n print(\" o\")\n print(\" |\")\n print(\" / \")\n if turns == 5:\n print(\" o\")\n print(\" |\")\n print(\" / \\ \")\n if turns == 4:\n print(\" o\")\n print(\" |\")\n print(\"_/ \\_ \")\n if turns == 3:\n print(\" o\")\n print(\" |-\")\n print(\"_/ \\_ \")\n if turns == 2:\n print(\" o\")\n print(\" -|-\")\n print(\"_/ \\_ \")\n if turns == 1:\n print(\"YOU HAVE FAILED TO GUESS THE WORD:\", secret)\n print(\"YOU WILL NEVER GET 100% WITHOUT A CURVE!\")\n print(\"YOU DIED FROM STRESS, LEADING THERE TO BE NO RESTART\")\n time.sleep(1)\n turns -= 2\n restart_button() #Calling the function below\n def hangman_display():\n \"\"\"\n Function packages code using no functions and will simulate a game to \n check if a letter is in the secret and showing the display_word variable\n to display the word snd the numbe of letters in the secret code to \n create a hangman game code. \n \"\"\"\n display = \"\"\n \n for char in secret:\n if char in guess:\n display += char\n elif char == \" \":\n display += char\n else:\n display += \"-\"", "async def reward():\n reward_chimes = random.randrange(1, 4)\n results = ''\n sorted_correct = sorted(correct_guessing_people.items(), key=lambda x: x[1], reverse=True)\n if len(sorted_correct) < 1:\n return await embeds.title_and_desc(msg.channel,\n '- Trivia Game Results -',\n 'Nobody guessed anything. That\\'s... interesting.',\n discord.Color.gold())\n for index, pepes_friend in enumerate(sorted_correct):\n print(index)\n if index == 0:\n results += f'**{bot.client.get_user(int(pepes_friend[0])).mention}** won with **{pepes_friend[1]}** ' \\\n f'Points and received **{reward_chimes} Chime{\"s\" if reward_chimes > 1 else \"\"}**' \\\n f' for it! :confetti_ball: :sparkler:\\n'\n data.modify_currency_of_user(msg.guild.id, bot.client.get_user(int(pepes_friend[0])), reward_chimes)\n else:\n results += f'**#{index + 1}**: {bot.client.get_user(int(pepes_friend[0])).mention} ' \\\n f'with {pepes_friend[1]} points!\\n'\n\n return await embeds.title_and_desc(msg.channel, '- Trivia Game Results -', results, discord.Color.gold())", "def place_token(self, token, verbose=False):\n chosen_place_name = self.mind.choose_place_name_to_put_token()\n for place_card in self.game.board:\n if place_card.name == chosen_place_name:\n token.place = place_card\n if verbose:\n logger.info('{} puts the {} token on {}'.format(self.name,\n token.name,\n place_card.name))\n break", "def otp_token_prompt(self, uri, token_method, *args, **kwargs):\n if getattr(self.options, 'diff_filename', None) == '-':\n raise CommandError('A two-factor authentication token is '\n 'required, but cannot be used with '\n '--diff-filename=-')\n\n print()\n print('Please enter your two-factor authentication token for Review '\n 'Board.')\n\n if token_method == 'sms':\n print('You should be getting a text message with '\n 'an authentication token.')\n print('Enter the token below.')\n elif token_method == 'call':\n print('You should be getting an automated phone call with '\n 'an authentication token.')\n print('Enter the token below.')\n elif token_method == 'generator':\n print('Enter the token shown on your token generator app below.')\n\n print()\n\n return get_pass('Token: ', require=True)", "def question():\n input('Ask your question and press the [Enter] button.')\n answer = response()\n print('\\nAsking the spirits...')\n for thought in range(3):\n print('.', end='')\n time.sleep(1)\n print(\"\\n{}\\n\".format(answer))\n replay()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True or False to determine if the board is full or not.
def is_board_full(board): # Review the board and check if it is full.
[ "def isFull(board):\n pass", "def is_full(self):\r\n for row in range(BOARD_ROWS):\r\n for col in range(BOARD_COLUMNS):\r\n if self.__board[row][col] == EMPTY:\r\n return False\r\n return True", "def is_board_full():\n num_pieces = np.sum(np.abs(board))\n print(\"Number of pieces in board {}\".format(num_pieces))\n if num_pieces == 9:\n return True\n return False", "def is_full(self):\n for i in xrange(self.start, self.board_end + 1):\n if not self.pos_full(i):\n return False\n return True", "def isBoardFilled(self):\n for i in range(1, 10):\n if self.isEmpty(i):\n return False\n return True", "def is_filled(self)->bool:\n for row in self.__board:\n for column in row:\n if column==EMPTY_SPACE:\n return False\n \n return True", "def check_boardsize():\n return BOARD_SIZE % 2 == 0", "def _is_full(self):\n\t\t # the number of round can't be superior to the number of case of the grid\n\t\treturn self._round > self.width * self.height", "def is_full(self) -> bool:\n return self.get_size() >= self.size", "def is_not_full(self):\n if len(self.players) >= self.max_players:\n return False\n\n return True", "def enemyOnBoard():\n zCount, fZCount, bCount, mCount, pCount = self.count_pieces()\n return zCount > 0 or fZCount > 0 or bCount > 0 or mCount > 0", "def is_game_full(self):\n if self.get_player_count() < self.PLAYER_MAX:\n return False\n else:\n return True", "def is_empty(self, row: int, col: int) -> bool:\n return self.board[row, col] == 0", "def isFull(self) -> bool:\n return len(self.queue) == self.size", "def isFull(self): \n maxCapacity = self.numNodes() * (self.numNodes() - 1)\n return self.numEdges() == maxCapacity", "def check_for_full_board():\r\n\r\n for cell in board:\r\n if 'Empty' in cell.content:\r\n return\r\n\r\n while True:\r\n\r\n for event in pygame.event.get():\r\n if event.type == KEYDOWN:\r\n if event.key == K_y:\r\n main()\r\n elif event.key == K_n:\r\n pygame.quit()\r\n sys.exit()\r\n\r\n GAME.fill(WHITE)\r\n SURF.fill(WHITE)\r\n draw_grid()\r\n draw_cells()\r\n\r\n winnerSurface = main_font.render(\"It's a tie !\", True, BLACK)\r\n winnerRect = winnerSurface.get_rect()\r\n winnerRect.center = SIZE // 2, GAME_y // 2\r\n SURF.blit(winnerSurface, winnerRect)\r\n\r\n againSurface = side_font.render('Play again ? (Y / N)', True, BLACK)\r\n againRect = againSurface.get_rect()\r\n againRect.center = SIZE // 2, SIZE - GAME_y // 2\r\n SURF.blit(againSurface, againRect)\r\n\r\n SURF.blit(GAME, (GAME_x, GAME_y))\r\n\r\n pygame.display.update()\r\n FPSclock.tick(FPS)\r\n\r\n else:\r\n return", "def game_draw(self):\n\t\tfor num in np.ravel(self.boardStatus):\n\t\t\tif num == self.type[\"blank\"]:\n\t\t\t\treturn False\n\t\tif self.game_won() != self.type[\"blank\"]:\n\t\t\treturn False\n\t\treturn True", "def is_full(self) -> bool:\n if self.nb_workers == 0:\n return False\n\n for i in range(self.nb_workers):\n queue = self.worker_input_queues[self.worker_control]\n if not queue.full():\n return False\n self.worker_control = (self.worker_control + 1) % self.nb_workers\n\n return True", "def test_board_filled():\r\n gc = GameController()\r\n board = Board(600, 600, 4, gc, WHITE, BLACK)\r\n assert board.board_filled() is False\r\n for i in range(board.SIZE):\r\n for j in range(board.SIZE):\r\n board.place_tile(i, j, board.BLACK)\r\n assert board.board_filled() is True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the function to create an edifact section representing the beginning of a message
def test_create_message_beginning(self): with self.subTest("Message beginning for a death registration"): expected = MessageBeginning(party_id="XX1", date_time="201904230900", ref_number="G5").segments op_def = fixtures.create_operation_definition_for_death_registration() message_adaptor = MessageDeathAdaptor(fhir_operation=op_def) msg_bgn = message_adaptor.create_message_beginning().segments compare(msg_bgn, expected)
[ "def create_message_segment_beginning(message_beginning_dict: EdifactDict) -> MessageSegmentBeginningDetails:\r\n reference_segment = get_value_in_dict(dict_to_search=message_beginning_dict, key_to_find=\"RFF\")\r\n reference_values = reference_segment.split(SUB_SECTION_SEPARATOR)\r\n reference_number = reference_values[1]\r\n return MessageSegmentBeginningDetails(reference_number)", "def setUp(self):\n self.message = Header()\n self.message.message_type = Type.OFPT_HELLO\n self.message.xid = 1\n self.message.length = 0", "def test_empty_header(self):\n text = ('text\\n\\n'\n '== ==\\n'\n '=====\\n'\n '=== ===\\n')\n result = extract_sections(text, self.site)\n self._extract_sections_tests(\n result,\n 'text\\n\\n',\n [('== ==', '\\n'), ('=====', '\\n'), ('=== ===', '\\n')]\n )", "def intro_section(first_day, last_day):\n LOGGER.info(\"Generating post introduction section...\")\n section = (\n f\"# Weekly Top of Utopian.io: {first_day:%B} {first_day.day} - \"\n f\"{last_day:%B} {last_day.day}\"\n \"<br><br>[Introduction (summary of the week)]\"\n )\n return section", "def test_calibphd_def_format(self):\n \n message = \"begin ims1.0\\nmsg_type request\\nmsg_id ex013\\ne-mail foo.bar@google.com\\ntime 1999/01/01 to 2000/01/01\\ncalibphd \\nstop\"\n \n parser = IMSParser()\n \n result = parser.parse(message)\n \n # check mandatory fields\n self.assertEqual(result['MSGFORMAT'],'ims1.0')\n self.assertEqual(result['MSGTYPE'],'request')\n self.assertEqual(result['MSGID'],'ex013')\n self.assertEqual(result['TARGET'],'EMAIL')\n self.assertEqual(result['EMAILADDR'],'foo.bar@google.com')\n \n # optional for this request\n self.assertFalse(result.has_key('SOURCE'))\n\n # product_1\n self.assertTrue(result.has_key('PRODUCT_1'))\n \n # validate that there is a sta_list and a subtype\n self.assertEqual(result['PRODUCT_1'], {'STARTDATE': '1999/01/01', 'ENDDATE': '2000/01/01', 'TYPE': 'CALIBPHD'})", "def test_waveform_segment_request_1(self):\n \n message = \" begin ims1.0\\nmsg_type request\\nmsg_id ex002 any_ndc\\ne-mail john.doo@ndc.gov.tr\\ntime 1999/7/6 1:45 to 1999/7/6 2:00\\nbull_type idc_reb\\nrelative_to bulletin\\nwaveform ims2.0:cm6\\nstop\"\n \n parser = IMSParser()\n \n result = parser.parse(message)\n \n #print(\"\\nresult = %s\\n\" %(result))\n \n # check mandatory fields\n self.assertEqual(result['MSGFORMAT'],'ims1.0')\n self.assertEqual(result['MSGTYPE'],'request')\n self.assertEqual(result['MSGID'],'ex002')\n self.assertEqual(result['TARGET'],'EMAIL')\n self.assertEqual(result['EMAILADDR'],'john.doo@ndc.gov.tr')\n \n # optional for this request\n self.assertTrue(result.has_key('SOURCE'))\n \n # product_1\n self.assertTrue(result.has_key('PRODUCT_1'))\n \n # validate that there is a sta_list and a subtype\n self.assertEqual(result['PRODUCT_1'], {'STARTDATE': '1999/7/6 1:45', 'ENDDATE': '1999/7/6 2:00', 'FORMAT': 'ims2.0', 'RELATIVETO': 'bulletin', 'SUBFORMAT': 'cm6', 'BULLTYPE': 'idc_reb', 'TYPE': 'WAVEFORM'})", "def test_simple_request_message(self):\n \n message = \"begin ims1.0\\r\\nmsg_type request\\nmsg_id ex009 any_ndc \\ne-mail foo.bar.ssi@domain.name.de \\ntime 1999/06/13 to 1999/06/14 \\nbull_type idc_reb \\nbulletin ims1.0\\nstop\"\n \n parser = IMSParser()\n \n result = parser.parse(message)\n \n #print(\"\\nresult = %s\\n\" %(result))\n \n # check mandatory fields\n self.assertEqual(result['MSGFORMAT'],'ims1.0')\n self.assertEqual(result['MSGTYPE'],'request')\n self.assertEqual(result['MSGID'],'ex009')\n self.assertEqual(result['TARGET'],'EMAIL')\n self.assertEqual(result['EMAILADDR'],'foo.bar.ssi@domain.name.de')\n \n # optional for this request\n self.assertEqual(result['SOURCE'],'any_ndc')\n \n # product_1\n self.assertTrue(result.has_key('PRODUCT_1'))\n \n self.assertEqual(result['PRODUCT_1'], {'FORMAT': 'ims1.0', 'STARTDATE': '1999/06/13', 'BULLTYPE': 'idc_reb', 'ENDDATE': '1999/06/14', 'TYPE': 'BULLETIN'})", "def testSectionHeaders(self, b, u):\n i = 0\n while i < len(u):\n i = u.find(r'\\s', i)\n if i == -1:\n return\n c = u.find(r'\\c', i)\n if c == -1:\n return\n if c - i < 50:\n print('Misplaced Section Header against chapter in: ' + b)\n i = c", "def make_test_envelope() -> Envelope:\n msg = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=1,\n target=0,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"hello\",\n )\n msg.to = \"any\"\n envelope = Envelope(\n to=\"any\",\n sender=\"any\",\n message=msg,\n )\n return envelope", "def start_section(self, section_number):\n if self.experiment is not None:\n self.experiment.section_prepare(section_number)", "def test__Attachment__precreate__0():\n attachment_id = 202211010000\n \n attachment = Attachment.precreate(attachment_id)\n _assert_fields_set(attachment)\n \n vampytest.assert_eq(attachment.id, attachment_id)", "def testReadFileHeader(self):\n output_writer = test_lib.TestOutputWriter()\n test_file = unified_logging.DSCFile(output_writer=output_writer)\n\n test_file_path = self._GetTestFilePath([\n 'uuidtext', 'dsc', '8E21CAB1DCF936B49F85CF860E6F34EC'])\n self._SkipIfPathNotExists(test_file_path)\n\n with open(test_file_path, 'rb') as file_object:\n test_file._ReadFileHeader(file_object)", "def test_clang_format_parser_line_start():\n cfp = ClangFormatXMLParser()\n data = \"\"\n offset = 0\n assert cfp.find_index_of_line_start(data, offset) == 0", "def test_handle_message_start(self):\n\n msg = Message(name='start', target='fake-id', origin='')\n newmsg = Message(name='start', target='fake-id_0', origin='fake-id')\n self.fexpr.state = 'ready'\n with patch('bureaucrat.flowexpression.Message') as MockMessage:\n MockMessage.return_value = newmsg\n result = self.fexpr.handle_message(self.ch, msg)\n self.assertEqual(result, 'consumed')\n self.assertEqual(self.fexpr.state, 'active')\n MockMessage.assert_called_once_with(name='start',\n target='fake-id_0',\n origin='fake-id')\n self.ch.send.assert_called_once_with(newmsg)", "def test_bcf_record_construct_minimal():\n header = bcf.BCFHeader()\n assert header.to_vcf_header() == '##fileformat=VCFv4.2\\n'", "def test_get_header_text_from_specs(self):\n\n # Empty the list (other unittests also use this list)\n self.gr_api.errorlog = []\n\n start = r\"^=Lorem ipsum dolor sit amet,\"\n end = r\"^=Lorem ipsum consetetur sadipscing elitr,\"\n header_start = r\"=Lorem ipsum dolor sit amet,\"\n header_end = r\"=Lorem ipsum consetetur sadipscing elitr,\"\n easy_specs = [\n [2,\n 'Specname1',\n u'=Lorem ipsum dolor sit amet,\\nconsetetur sadipscing elitr,' +\\\n ' sed diam nonumy eirmod tempor\\ninvidunt ut labore et ' +\\\n 'dolore magna aliquyam erat, sed diam\\nvoluptua.\\n' +\\\n '=Lorem ipsum consetetur sadipscing elitr,\\n' +\\\n ' sed diam nonumy eirmod tempor\\ninvidunt ut labore et ' +\\\n 'dolore magna aliquyam erat, sed diam\\nvoluptua.'],\n [3,\n 'Specname2',\n u'=Lorem ipsum dolor sit amet,\\nconsetetur sadipscing elitr,' +\\\n ' sed diam nonumy eirmod tempor\\ninvidunt ut labore et ' +\\\n 'dolore magna aliquyam erat, sed diam\\nvoluptua.\\n' +\\\n '=Lorem ipsum consetetur sadipscing elitr,\\n' +\\\n ' sed diam nonumy eirmod tempor\\ninvidunt ut labore et ' +\\\n 'dolore magna aliquyam erat, sed diam\\nvoluptua.']]\n\n section_lorem_ipsum = [\n [2,\n 'Specname1',\n 'consetetur sadipscing elitr, sed diam nonumy ' +\\\n 'eirmod tempor\\ninvidunt ut labore et dolore ' +\\\n 'magna aliquyam erat, sed diam\\nvoluptua.\\n'],\n [3,\n 'Specname2',\n 'consetetur sadipscing elitr, sed diam nonumy ' +\\\n 'eirmod tempor\\ninvidunt ut labore et dolore ' +\\\n 'magna aliquyam erat, sed diam\\nvoluptua.\\n']]\n\n req = MockRequest(self.gr_api.envs['task'])\n\n info = [easy_specs,\n start,\n end,\n header_start,\n header_end,\n req]\n\n self.assertEqual(\n self.gr_api.get_header_text_from_specs(info),\n section_lorem_ipsum,\n \"Extracted spec sections do not match!\")\n\n self.gr_api.errorlog = []\n start = r\"^=Lorem ipsum dolor sit amet,\"\n end = r\"^=Lorem ipsum consetetur sadipscing elitr,\"\n header_start = r\"=Lorem ipsum dolor sit amet,\"\n header_end = r\"=Lorem ipsum consetetur sadipscing elitr,\"\n easy_specs = [\n [2,\n 'Specname1',\n u'consetetur sadipscing elitr,' +\\\n ' sed diam nonumy eirmod tempor\\ninvidunt ut labore et ' +\\\n 'dolore magna aliquyam erat, sed diam\\nvoluptua.\\n' +\\\n '=Lorem ipsum consetetur sadipscing elitr,' +\\\n ' sed diam nonumy eirmod tempor\\ninvidunt ut labore et ' +\\\n 'dolore magna aliquyam erat, sed diam\\nvoluptua.'],\n [3,\n 'Specname2',\n u'Lorem ipsum dolor sit amet,\\nconsetetur sadipscing elitr,' +\\\n ' sed diam nonumy eirmod tempor\\ninvidunt ut labore et ' +\\\n 'dolore magna aliquyam erat, sed diam\\nvoluptua.\\n' +\\\n '=Lorem ipsum consetetur sadipscing elitr,' +\\\n ' sed diam nonumy eirmod tempor\\ninvidunt ut labore et ' +\\\n 'dolore magna aliquyam erat, sed diam\\nvoluptua.']]\n\n info = [easy_specs,\n start,\n end,\n header_start,\n header_end,\n req]\n\n self.gr_api.get_header_text_from_specs(info)\n\n errorlog = [(\"Cannot find the header in the spec text.\" +\\\n \" Please check spelling & special characters.\" +\\\n \" Regex engine will match the following header:\" +\\\n \" '=Lorem ipsum dolor sit amet,'\",\n 'http://example.org/Coconut/task/ticket/2',\n 'http://example.org/Coconut/event/wiki/Specname1'),\n (\"Cannot find the header in the spec text.\" +\\\n \" Please check spelling & special characters.\" +\\\n \" Regex engine will match the following header:\" +\\\n \" '=Lorem ipsum dolor sit amet,'\",\n 'http://example.org/Coconut/task/ticket/3',\n 'http://example.org/Coconut/event/wiki/Specname2')]\n\n self.assertEqual(self.gr_api.errorlog, errorlog,\n \"Errorlogs do not match!\")\n\n self.gr_api.errorlog = []\n start = r\"^=Lorem ipsum dolor sit amet,\"\n end = r\"^=Lorem ipsum consetetur sadipscing elitr,\"\n header_start = r\"=Lorem ipsum dolor sit amet,\"\n header_end = r\"=Lorem ipsum consetetur sadipscing elitr,\"\n easy_specs = [\n [2,\n 'Specname1',\n u'=Lorem ipsum dolor sit amet,\\nconsetetur sadipscing elitr,' +\\\n ' sed diam nonumy eirmod tempor\\ninvidunt ut labore et ' +\\\n 'dolore magna aliquyam erat, sed diam\\nvoluptua.\\n' +\\\n '=Lorem ipsum\\n' +\\\n ' sed diam nonumy eirmod tempor\\ninvidunt ut labore et ' +\\\n 'dolore magna aliquyam erat, sed diam\\nvoluptua.'],\n [3,\n 'Specname2',\n u'=Lorem ipsum dolor sit amet,\\nconsetetur sadipscing elitr,' +\\\n ' sed diam nonumy eirmod tempor\\ninvidunt ut labore et ' +\\\n 'dolore magna aliquyam erat, sed diam\\nvoluptua.\\n' +\\\n '=Lorem ipsum consetetur sadipscing elitr,\\n' +\\\n ' sed diam nonumy eirmod tempor\\ninvidunt ut labore et ' +\\\n 'dolore magna aliquyam erat, sed diam\\nvoluptua.']]\n\n info = [easy_specs,\n start,\n end,\n header_start,\n header_end,\n req]\n\n self.gr_api.get_header_text_from_specs(info)\n\n errorlog = [(\"Program is trying to match the spec text \" +\\\n \"between two headers. It CAN find the first \" +\\\n \"header but it CANNOT find the next header.\\n\\n\" +\\\n \"The next header defines the end of a section and\" +\\\n \" the beginning of a next section. Please check\" +\\\n \" spelling & special characters. Regex engine\" +\\\n \" could not find the following header:\" +\\\n \" '=Lorem ipsum consetetur sadipscing elitr,'\",\n 'http://example.org/Coconut/task/ticket/2',\n 'http://example.org/Coconut/event/wiki/Specname1')]\n\n self.assertEqual(self.gr_api.errorlog, errorlog,\n \"Errorlogs do not match.\")", "def generate_start_event() -> Event:\n start = {'concept:name': ARTIFICIAL_START,\n 'lifecycle:transition': 'complete',\n 'time:timestamp':\n datetime.datetime(1, 1, 1, 0, 0,\n tzinfo=datetime.timezone(\n datetime.timedelta(seconds=3600)))\n }\n\n return Event(start)", "def test_section__end_tag_with_no_start_tag(self):\n template = '{{/section}}'\n try:\n self._assert_render(None, template)\n except ParsingError, err:\n self.assertEqual(str(err), \"Section end tag mismatch: section != None\")", "def test_simple(self):\n c = \"\"\"\n int main() {\n #pragma omp sections\n {\n }\n }\n \"\"\"\n ast = self.parser.parse(c)\n child = ast.ext[0].body.block_items[1]\n pv = self.PragmaVisitor()\n ov = self.OmpSectionsVisitor()\n\n ast = self.transform.visit(ast)\n\n ov.visit(ast)\n pv.visit(ast)\n\n self.assertEqual(0, len(pv.nodes))\n self.assertEqual(1, len(ov.nodes))\n self.assertEqual(child, ov.nodes[0].sections)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate the text for this menu item.
def text(self) -> "str": # type: ignore if callable(self.text_generator): text = self.text_generator() else: text = self.text_generator if self.toggler is not None: text += " ✓" if self.toggler() else "" # Check if this menu item should be disabled, and if so, remove the handler self.handler = None if self.disabled else self._handler return text
[ "def _get_text(self) -> \"std::string\" :\n return _core.RadialMarkingMenu__get_text(self)", "def text(self) -> str:\n attr_text = self.separator.join(self.values)\n return f'{self.name}=\"{attr_text}\"'", "def get_item_text(self, widget, index):\n return widget.GetString(index)", "def text(self):", "def __str__(self):\n output = f\"This choice involves selecting {self.type} for {self.name}.\\n\" \\\n f\"It takes the suboptions {self.elements}\"\n return output", "def __str__(self):\n return 'text \"%s\" at (%g,%g)' % (self._text, self._position.x, self._position.y)", "def intro_text(self):\n # initial description\n self.start_supplies = \"\"\"\n\n \"\"\"\n # description after supplies are added\n self.no_supplies = \"No supplies left at this location\"\n # define the descriptive text for the supply text\n supply_text = [self.start_supplies, self.no_supplies]\n # switch messages after the supplies are added\n if self.i == 0:\n self.i += 1\n return supply_text[0]\n else:\n return supply_text[1]", "def get_menu_item_name(self):\n return self.menu_item_name", "def __str__(self):\n text = \"Recipe for: \" + self.name + \"\\nIt's a level \"+str(self.cooking_lvl)+\" recipe that takes \"+str(self.cooking_time)+\"min to prepare.\\n\"\n text = text + \"The ingredient list is :\" + str(self.ingredients) + \"\\nRecipe Description:\\n\" + self.description + \"\\nIt's a \" + self.type\n return text", "def GetText(self):", "def label_text(self):\n raise NotImplementedError()", "def rcmenu_item():\n yield keyword(\"menuitem|separator|submenu\")\n yield normalspaces()\n varname = yield var_name()\n yield normalspaces()\n label = yield quoted\n yield normalspaces()\n vnarg = yield sepBy(named_argument, singlelinespaces())\n return s.Construct(s.RCMENU_ITEM, varname, label, vnarg)", "def get_items(self):\r\n options = \"\"\r\n for item in self.menu:\r\n options += f\"{item.name} ${item.cost:.2f} | \"\r\n return options", "def get_text(self, instance):\n return instance.selftext if instance.is_self else None", "def __str__(self):\n text = (self.amount + ' ' + (self.measure + ' ' + self.ingredient).strip()).strip()\n if preparation_method:\n text += ' -- ' + preparation_method\n return text", "def get_text(self):\r\n\t\treturn self.text", "def get_text(self, widget):\n return widget.GetLabel()", "def get_text(self):\n return self.widget.GetValue()", "def __create_item(self) -> str:\n flattened_outcomes = Quick_Python.flatten(self.outcomes)\n outcome = random.sample(flattened_outcomes, 1)[0]\n if isinstance(outcome, dict):\n outcome, amount = list(outcome.items())[0]\n print(f\"Crafting {outcome}: Replacing amount {self.amount} with {amount}\")\n self.amount = amount\n return \" \".join(item for item in [self.prefix, outcome] if item)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Formats a command from the base command with class variables and adds them the the batches' command list
def format_command(self, unique_item=None): inserts = {} if '{exe}' in self.command_base: inserts["exe"] = self.executable if '{out}' in self.command_base: inserts["out"] = '{out}' if '{mod}' in self.command_base: inserts["mod"] = self.model_path if '{in}' in self.command_base: inserts["in"] = os.path.join(self.model_path, 'in') if '{unique}' in self.command_base: inserts["unique"] = unique_item if '{cpus}' in self.command_base: inserts["cpus"] = self.cpus self.commands.append(self.command_base.format(**inserts))
[ "def getCmdString(self,cmd):\n if hasattr(cmd,\"command\") and isinstance(cmd.command, Command):\n cmd.command = cmd.command.composeCmdString()\n return super(self.__class__,self).getCmdString(cmd)\n elif isinstance(cmd,list):\n cmdarr = []\n for c in cmd:\n if hasattr(c,\"command\") and isinstance(c.command, Command):\n c.command = c.command.composeCmdString()\n cmdarr.append(c.command.composeCmdString())\n cmdarr.append(super(self.__class__,self).getCmdString(cmd))\n return \"\\n\".join(cmdarr)\n else:\n return super(self.__class__,self).getCmdString(cmd)", "def preprocess_commands(self):\n expanded_commands = []\n\n for command in self.commands: # Look through each command...\n operation_added = False\n for operation in command['do']: # Look through each action...\n # Check if this action is an add operation, and if it's adding more than 10 products.\n if operation.get('add') and len(operation['add']['product']) > 10:\n # We need to split the groups into batches of 10 and create a new command for each batch.\n for groups in [operation['add']['product'][i:i + 10]\n for i in xrange(0, len(operation['add']['product']), 10)]:\n # Create a copy of the current command.\n new_command = dict(command)\n new_do_operations = []\n for old_operation in command['do']: # Work through all the operations in the command\n if old_operation.get('add'): # We want to replace add operations but leave others\n new_do_operations.append({'add': {'product': groups}})\n elif old_operation.get('remove'): # Except remove operations which we'll handle below\n continue\n else:\n new_do_operations.append(old_operation)\n new_command['do'] = new_do_operations\n expanded_commands.append(new_command)\n operation_added = True # Set a flag to show something changed.\n\n # Same again, but this time with remove operations.\n elif operation.get('remove') and len(operation['remove']['product']) > 10:\n for groups in [operation['remove']['product'][i:i + 10]\n for i in xrange(0, len(operation['remove']['product']), 10)]:\n new_command = dict(command)\n new_do_operations = []\n for old_operation in command['do']:\n if old_operation.get('remove'):\n new_do_operations.append({'remove': {'product': groups}})\n elif old_operation.get('add'):\n continue\n else:\n new_do_operations.append(old_operation)\n new_command['do'] = new_do_operations\n expanded_commands.append(new_command)\n operation_added = True\n\n # This is an unusual constuct for...else - the else block gets executed once the for loop has\n # completed.\n else:\n if not operation_added:\n # Add the command if it didn't contain a add or remove block with more than 10 groups.\n expanded_commands.append(command)\n\n # Return the expanded_commands list chunked into lists of 10 commands.\n return [expanded_commands[i:i + 10] for i in xrange(0, len(expanded_commands), 10)]", "def create_conversion_commands(self):\n\n for shot in self.shots.values():\n shot[\"ffmpeg_commands\"] = []\n for repr_name, repr_data in self.representation_metadata.items():\n ffmpeg_template = repr_data.get(\"ffmpeg_template\")\n if ffmpeg_template:\n ffmpeg_data = {\n \"infile\": shot[\"movie_file\"][\"path\"],\n \"outfile\": shot[repr_name][\"path\"],\n }\n if repr_name == \"image_sequence\":\n ffmpeg_data[\"frame_rate\"] = shot[\"frame_rate\"]\n\n ffmpeg_string = ffmpeg_template.format(**ffmpeg_data)\n ffmpeg_command = ffmpeg_string.split(\" \")\n shot[\"ffmpeg_commands\"].append(ffmpeg_command)", "def buildcommand(self, mofullname, moparent, attr):\n\n cmd = ''\n objectvarname = None\n\n mopackagename, moclassname = self.resolvemoname(mofullname)\n\n if moclassname.startswith('Rt'):\n # skipping object that is a relationship target (Rt), since they are\n # not going to contain any useful configuration we need to copy\n pass\n else:\n attribstr = self.buildattributestring(attr)\n\n cobrapackagename = 'cobra.model.%s' % mopackagename\n if cobrapackagename not in self.importlist:\n self.importlist.append(cobrapackagename)\n\n objectvarname = self.getvarname(mofullname)\n if mofullname == 'polUni':\n return '', moparent\n\n parms = [moparent]\n if attribstr != '':\n parms.append(attribstr)\n\n cmd += '%s = cobra.model.%s.%s(%s)\\n' % (\n objectvarname, mopackagename, moclassname, ', '.join(parms))\n if attr.get('status', '') == 'deleted':\n cmd += '%s.delete()\\n' % objectvarname\n\n self.objectcounter += 1\n return cmd, objectvarname", "def at_cmdset_creation(self):\r\n self.add(Command())", "def submit_batch(self, command):\n pass", "def command(self, *args, **kwargs):\n kwargs['cls'] = CustomCommand\n return super().command(*args, **kwargs)", "def buildcommands(self) -> str:\n commands = []\n for line in self.command_terms:\n if len(line) == 0:\n continue\n command = DM()\n for term in line:\n if isinstance(term, (int, float)):\n command.append('term', DM([('parameter', term)]))\n else:\n command.append('term', DM([('option', str(term))]))\n commands.append(command)\n return commands", "def prepare_command(self, title, cmd):\n return cmd", "def buildCommands(self):\n SimpleLogger.outputVerbose(\n [\"Command: \", self.commandTemplateString], \n [\"Arguments: \"+Formatter.ListAsString(self.argumentLists, True)]\n )\n #Use every combinations of the argument lists\n if self.runAllArgumentCombinations:\n SimpleLogger.outputVerbose(\"Using combinatation arguments\")\n return CommandBuilder.CreateCommandsFromAllArgumentCombinations(self.commandTemplateString, self.argumentLists)\n # Use the nth item in each list for the nth command executed\n else:\n SimpleLogger.outputVerbose(\"Using indexed arguments\")\n return CommandBuilder.CreateCommandsFromDistinctArgumentIndices(self.commandTemplateString, self.argumentLists)", "def __init__(self, command: str):\n self.command = command", "def transform(self, cmd):\n return cmd", "def _formatCmd( self, cmd ):\n if self.cmd is None:\n msgFmt = \"Unable to execute commands in subshell because shell\" \\\n \" functionality not implemented for OS %s\" \\\n \" Failed command=%s\"\n raise OSError( 0, msgFmt % ( os._name, cmd ))\n\n if isinstance(cmd, basestring):\n shellCmd = self.cmd + [cmd]\n else:\n shellCmd = cmd\n\n return shellCmd", "def _formatCmd( self, cmd ):\r\n if self.cmd is None:\r\n msgFmt = \"Unable to execute commands in subshell because shell\" \\\r\n \" functionality not implemented for OS %s\" \\\r\n \" Failed command=%s\"\r\n raise OSError( 0, msgFmt % ( os._name, cmd ))\r\n\r\n if isinstance(cmd, basestring):\r\n shellCmd = self.cmd + [cmd]\r\n else:\r\n shellCmd = cmd\r\n\r\n return shellCmd", "def generate_hooked_command(cmd_name, cmd_cls, hooks):\n\n def run(self, orig_run=cmd_cls.run):\n self.run_command_hooks('pre_hooks')\n orig_run(self)\n self.run_command_hooks('post_hooks')\n\n return type(cmd_name, (cmd_cls, object),\n {'run': run, 'run_command_hooks': run_command_hooks,\n 'pre_hooks': hooks.get('pre', []),\n 'post_hooks': hooks.get('post', [])})", "def composeCmdString(self):\n if hasattr(self,\"cmdstring\"):\n print \"cmdstring is %s\" % self.cmdstring\n return self.cmdstring\n cmdstring = \"\"\n if hasattr(self,\"cmdarray\") and len(self.cmdarray) > 0:\n cmdstring += \" \".join(self.cmdarray)\n if hasattr(self,\"cmdparametervalues\"):\n if not hasattr(self,\"parameterdefs\"):\n for k,v in self.cmdparametervalues.iteritems():\n if not k.startswith(\"-\"):\n if len(k) == 1:\n k = \"-\" + k\n else:\n k = \"--\" + k\n if v == False:\n continue\n if v == True:\n cmdstring += \" %s\" % k\n else:\n cmdstring += \" %s=%s\" % (k,v)\n else:\n # This is the branch for commands defined by parameter defs\n # Tool name should be in the \"bin\" attribute \n if hasattr(self,\"bin\"):\n cmdstring = self.bin\n else:\n raise Exception(\"Specified command must have a 'bin' attribute.\")\n \n # Determines if the argument pattern is an optional one\n optionalargre = re.compile(\"\\?.+?\\?\")\n \n # Determines if the argument pattern has quoting of the <VALUE>\n quotecheckre = re.compile(\"(\\S)<VALUE>(\\S)\") \n \n # Go through the parameter defs in order and \n # for any parameter with a value, substitute the value into the \n # \"pattern\"\n \n # Sort the parameterdef keys based on pdef.order\n sortednames = sorted(self.parameterdefs.iterkeys(),key=lambda name: int(self.parameterdefs[name].order))\n \n for pname in sortednames:\n pdef = self.parameterdefs[pname]\n if pname in self.cmdparametervalues:\n value = self.cmdparametervalues[pname]\n \n if value == False:\n continue\n \n # If <VALUE> is surrounded by something (e.g. single quotes)\n # then we should make sure that char is escaped in the value\n quotestring = None\n match = quotecheckre.search(pdef.pattern)\n if match is not None:\n if len(match.groups()) == 2:\n if match.group(1) == match.group(2):\n quotestring = match.group(1)\n \n # Do some courtesy escaping\n if isinstance(value,basestring) and quotestring is not None:\n # Remove existing escapes\n value = value.replace(\"\\\\\" + quotestring,quotestring)\n # Escape the quote\n value = value.replace(quotestring,\"\\\\\" + quotestring)\n \n \n # Substitute the value into the pattern\n if optionalargre.search(pdef.pattern) is not None:\n \n # This is the case of a switch with an optional argument\n if value == True:\n # Adding the switch with no argument\n cmdstring += \" %s\" % optionalargre.sub(\"\",pdef.pattern)\n else:\n # Remove the question marks and substitute the VALUE\n cmdstring += \" %s\" % pdef.pattern.replace(\"?\",\"\").replace(\"<VALUE>\",value)\n \n else:\n if value == True:\n cmdstring += \" %s\" % pdef.pattern\n else:\n cmdstring += \" %s\" % pdef.pattern.replace(\"<VALUE>\",value)\n \n return cmdstring.encode('ascii','ignore')", "def build_command(self, device_dict, command_tuple):\n command = \" \" # The final command which should be send in the end\n return_list = [] # Is list of commands which can be returned if need be\n only_command = False # Flag if only a command was passed, important if such a command doesnt need syntax!\n\n if type(command_tuple) == unicode or type(command_tuple)== str or type(command_tuple)== float or type(command_tuple)== int:\n command_tuple = (str(command_tuple),\"\") # so only tuple are now prevelent\n only_command = True\n elif type(command_tuple[1]) == list:\n command_tuple = (command_tuple[0], [str(x) for x in command_tuple[1]]) # so no unicode is present\n\n # Preparations\n # look for a syntax (paranteses and so on)\n if \"syntax\" in device_dict:\n syntax = str(device_dict[\"syntax\"])\n syntax = syntax.split(\"###\")\n if not syntax[0]:\n syntax = [\"\", \"\"] # Most devices have no paranteses or whatsoever\n else:\n syntax = [\"\",\"\"] # Most devices have no paranteses or whatsoever\n\n #Looks if a separator is needed to sepatare mulitple orders\n if \"separator\" in device_dict:\n sepa = str(device_dict[\"separator\"])\n else:\n sepa = \" \" # This should be the standard for most devices\n\n\n if command_tuple[0] in device_dict:\n # here all the magic happens\n # First look if the order is swichted or not (command value, or value command)\n\n # Check if multiple commands so list or so\n if type(device_dict[command_tuple[0]]) == str or type(device_dict[command_tuple[0]]) == unicode:\n command_list = [device_dict[command_tuple[0]]]\n else:\n command_list = device_dict[command_tuple[0]]\n\n for command_item in command_list:\n command_item = str(command_item)\n command = \"\"\n\n # Value -> Command\n if int(device_dict.get(\"command_order\", 1)) == -1:\n # Now look if a csv structure is necessary for the command to work\n start_ind = command_tuple[0].find(\"_\") # finds the index of the command, to search for\n if \"CSV\" + command_tuple[0][start_ind:] in device_dict: # looks if an actual csv-command is there\n # Todo: test CSV command\n csv_commands = device_dict[\"CSV\" + str(command_tuple[0])[start_ind:]]\n csv_commands = csv_commands.strip().strip(\"(\").strip(\")\").strip(\"[\").strip(\"]\").strip() # get rid of some caracters which should not be there\n csv_commands = csv_commands.split(\",\") # now split it for easy access\n\n # Make sure you always got a list of the next commandblock will fail\n if type(command_tuple[1]) == list or type(command_tuple[1]) == tuple:\n value_list = command_tuple[1]\n elif type(command_tuple[1]) == str or type(command_tuple) == unicode:\n value_list = command_tuple[1].strip().strip(\"(\").strip(\")\").strip(\"[\").strip(\"]\").strip().replace(\" \", \"\")\n value_list = value_list.split(\",\")\n\n csv_list = \",\".join(map(str,value_list)).strip().strip(\"(\").strip(\")\").strip(\"[\").strip(\"]\").strip()\n csv_list = csv_list.split(\",\")\n\n for i, com in enumerate(csv_list):\n # here the input will be checked if enough parameters are passed for this command.\n # If not a 0 will be entered and a warning will be printed\n command += str(csv_list[i]).strip() + sepa\n\n if i+1 < len(csv_commands) and len(csv_commands)>1:\n for j in range(i+1, len(csv_commands)): # Fill the rest of the missing paramters\n print \"Warning: Not enough parameters passed for function: \" + str(command_item) + \" the command must consist of \" + str(csv_commands) + \" '\" + str(csv_commands[j]) + \"' is missing! Inserted 0 instead.\"\n l.error(\"Warning: Not enough parameters passed for function: \" + str(command_item) + \" the command must consist of \" + str(csv_commands) + \" '\" + str(csv_commands[j]) + \"' is missing! Inserted 0 instead.\")\n command += \"0\" + sepa\n\n command = command.strip(\" \").strip(\",\") # to get rid of last comma\n\n else: # So if no CSV was found for this command, just build the command with the value and the separator\n # First check if a List is present or so\n if type(command_tuple[1]) == list or type(command_tuple[1]) == tuple:\n string = \"\"\n for item in command_tuple[1]:\n command = syntax[1] + str(item) + \" \" + command_item\n command = command.strip()\n # Add a command terminator if one is needed and the last part of the syntax\n command += device_dict.get(\"execution_terminator\", \"\")\n return_list.append(command)\n return return_list\n\n else: # If only a command was passed\n string = str(command_tuple[1])\n command += syntax[1] + str(string).strip()\n\n if only_command and device_dict.get(\"no_syntax_with_single_commmand\", False) and syntax[1]!= \" \" and syntax[0]!= \" \":\n command = command.replace(syntax[1], \"\")\n command = command.replace(syntax[0], \"\")\n\n #command += \" \" + str(device_dict[str(command_item)]).strip() + syntax[0] # adds the order to the command\n command += \" \" + str(command_item).strip() + syntax[0] # adds the order to the command\n # Add a command terminator if one is needed and the last part of the syntax\n command = command.strip()\n command += device_dict.get(\"execution_terminator\", \"\")\n #command += syntax[0] # adds the order to the command\n return_list.append(command)\n\n #Command -> Value\n else:\n command += str(command_item).strip() + \" \" + syntax[0] # adds the order to the command\n\n # Now look if a csv structure is necessary for the command to work\n start_ind = command_tuple[0].find(\"_\") # finds the index of the command, to search for\n if \"CSV\" + command_tuple[0][start_ind:] in device_dict: # looks if an actual csv-command is there\n #Todo: test CSV command\n csv_commands = device_dict[\"CSV\" + str(command_tuple[0])[start_ind:]]\n csv_commands = csv_commands.strip().strip(\"(\").strip(\")\").strip(\"[\").strip(\"]\").strip() # get rid of some caracters which should not be there\n csv_commands = csv_commands.split(\",\") # now split it for easy access\n\n # Make sure you always got a list of the next commandblock will fail\n if type(command_tuple[1]) == list or type(command_tuple[1]) == tuple:\n value_list = command_tuple[1]\n elif type(command_tuple[1])==str or type(command_tuple)==unicode:\n value_list = command_tuple[1].strip().strip(\"(\").strip(\")\").strip(\"[\").strip(\"]\").strip().replace(\" \", \"\")\n value_list = value_list.split(\",\")\n\n\n csv_list = \",\".join(map(str,value_list)).strip().strip(\"(\").strip(\")\").strip(\"[\").strip(\"]\").strip()\n csv_list = csv_list.split(\",\")\n\n for i, com in enumerate(csv_list):\n # here the input will be checked if enough parameters are passed for this command.\n # If not a 0 will be entered and a warning will be printed\n command += str(csv_list[i]).strip() + sepa + \" \"\n\n if i+1 < len(csv_commands) and len(csv_commands)>1:\n for j in range(i+1, len(csv_commands)):# Fill the rest of the missing paramters\n print \"Warning: Not enough parameters passed for function: \" + str(command_item) + \" the command must consist of \" + str(csv_commands) + \" '\" + str(csv_commands[j]) + \"' is missing! Inserted 0 instead.\"\n l.error(\"Warning: Not enough parameters passed for function: \" + str(command_tuple[0]) + \" the command must consist of \" + str(csv_commands) + \" '\" + str(csv_commands[j]) + \"' is missing! Inserted 0 instead.\")\n command += \" \" + \"0\" + sepa\n\n command = command.strip(\" \").strip(\",\") # to get rid of last comma and space at the end if csv\n command += syntax[1]\n\n else: # So if no CSV was found for this command, just build the command with the value and the separator\n # First check if a List is present or so\n if type(command_tuple[1]) == list or type(command_tuple[1]) == tuple:\n string = \"\"\n for item in command_tuple[1]:\n command = str(item) + \" \" + command_item + syntax[1]\n command = command.strip()\n # Add a command terminator if one is needed and the last part of the syntax\n command += device_dict.get(\"execution_terminator\", \"\")\n return_list.append(command)\n return return_list\n\n else: # If its just one value or no value\n string = str(command_tuple[1])\n command += string.strip() + syntax[1]\n command = command.strip()\n\n if only_command and device_dict.get(\"no_syntax_with_single_commmand\", False) and syntax[1]!= \" \" and syntax[0]!= \" \":\n command = command.replace(syntax[1], \"\")\n command = command.replace(syntax[0], \"\")\n\n # Add a command terminator if one is needed and the last part of the syntax\n command += device_dict.get(\"execution_terminator\", \"\")\n return_list.append(command.strip())\n else:\n # If the command is not found in the device only command tuple will be send\n print \"Command \" + str(command_tuple[0]) + \" was not found in device! Unpredictable behavior may happen. No commad build!\"\n l.error(\"Command \" + str(command_tuple[0]) + \" was not found in device! Unpredictable behavior may happen. No commad build!\")\n return \"\"\n\n # Add a command terminator if one is needed and the last part of the syntax\n #command += device_dict.get(\"execution_terminator\",\"\")\n\n\n\n # Todo: multiple commands return\n if len(return_list) > 1:\n return return_list\n else:\n return str(return_list[0])", "def _class_name_to_command(self):\r\n\r\n command = []\r\n for i in range(len(self.__class__.__name__)):\r\n c = self.__class__.__name__[i]\r\n if i == 0:\r\n command.append(c.lower())\r\n elif i > 0 and c.isupper():\r\n command.append('_')\r\n command.append(c.lower())\r\n else:\r\n command.append(c)\r\n\r\n return ''.join(command)", "def add_cmd(cls, session, command):\n cmd = cls(\n start_time=command[\"Start\"],\n end_time=command[\"End\"],\n success=command[\"Success\"],\n target_id=command[\"Target\"],\n plugin_key=command[\"PluginKey\"],\n modified_command=command[\"ModifiedCommand\"].strip(),\n original_command=command[\"OriginalCommand\"].strip(),\n )\n session.add(cmd)\n session.commit()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds the path to the desired URLs dataset
def build_urls_path(dataset): this_file = os.path.abspath(__file__) this_folder = os.path.dirname(this_file) datasets_path = pathlib.Path(this_folder) / ".." / 'datasets' if dataset == 'inventory': return datasets_path / 'inv_urls.csv' if dataset == 'repatriation': return datasets_path / 'repatriation_urls.csv'
[ "def build_dataset_url(app, uuid, basename, aset, extension):\n return '/apps/%s/datasets/%s/%s.%s.%s' % (app, uuid, basename, aset, extension)", "def _generate_urls(self):\n if self.ssl is True:\n self.schema = \"https\"\n else:\n self.schema = \"http\"\n self.read_url = \"{0}://{1}:{2}/api/v1/datapoints/query\".format(self.schema, self.server, self.port)\n self.read_tag_url = \"{0}://{1}:{2}/api/v1/datapoints/query/tags\".format(self.schema, self.server, self.port)\n self.write_url = \"{0}://{1}:{2}/api/v1/datapoints\".format(self.schema, self.server, self.port)\n self.delete_dps_url = \"{0}://{1}:{2}/api/v1/datapoints/delete\".format(self.schema, self.server, self.port)\n self.delete_metric_url = \"{0}://{1}:{2}/api/v1/metric/\".format(self.schema, self.server, self.port)", "def _fix_dataset_urls():\n for dataset in get_project_datasets():\n dataset.url = dataset._id\n for file_ in dataset.files:\n if file_.url:\n file_.url = url_to_string(file_.url)\n\n dataset.to_yaml()", "def _get_create_data_set_url(self):\n return self.api_url+'/data_sets/create_dataset'", "def get_dataset_url(self, name):\n url = self.generate_url(name)\n page = requests.get(url, headers=self.headers)\n tree = html.fromstring(page.text)\n rows = tree.xpath('//table[@id=\"data-files\"]//tbody')\n list_datasets = []\n for row in rows:\n filename = get_last(row.xpath('.//td[@class=\"file-name\"]/text()'))\n for link in row.xpath('.//td[2]//a'):\n dataset = {}\n dataset['competition_name'] = name\n dataset['filename'] = filename\n dataset['url'] = base_url + get_last(link.xpath('./@href'))\n dataset['name'] = get_last(link.xpath('./@name'))\n dataset['size'] = regex_size.search(get_last(link.xpath('./text()'))).group(1)\n list_datasets.append(dataset)\n return list_datasets", "def get_dataset_path(base_dir, setname='train'):\n\n import os\n if setname == 'train':\n lidar_dir = os.path.join(base_dir, 'data_depth_velodyne', 'train')\n depth_dir = os.path.join(base_dir, 'data_depth_annotated', 'train')\n rgb_dir = os.path.join(base_dir, 'raw')\n elif setname == 'val':\n lidar_dir = os.path.join(base_dir, 'data_depth_velodyne', 'val')\n depth_dir = os.path.join(base_dir, 'data_depth_annotated', 'val')\n rgb_dir = os.path.join(base_dir, 'raw')\n elif setname == 'selval':\n lidar_dir = os.path.join(base_dir, 'val_selection_cropped', 'velodyne_raw')\n depth_dir = os.path.join(base_dir, 'val_selection_cropped', 'groundtruth_depth')\n rgb_dir = os.path.join(base_dir, 'val_selection_cropped', 'image')\n elif setname == 'test':\n lidar_dir = os.path.join(base_dir, 'test_depth_completion_anonymous', 'velodyne_raw')\n depth_dir = os.path.join(base_dir, 'test_depth_completion_anonymous', 'velodyne_raw')\n rgb_dir = os.path.join(base_dir, 'test_depth_completion_anonymous', 'image')\n else:\n raise ValueError(\"Unrecognized setname \"+str(setname))\n\n return lidar_dir, depth_dir, rgb_dir", "async def initial_path_for_datasette(datasette):\n databases = dict([p for p in datasette.databases.items() if p[0] != \"_internal\"])\n if len(databases) == 1:\n db_name = next(iter(databases.keys()))\n path = datasette.urls.database(db_name)\n # Does this DB only have one table?\n db = next(iter(databases.values()))\n tables = await db.table_names()\n if len(tables) == 1:\n path = datasette.urls.table(db_name, tables[0])\n else:\n path = datasette.urls.instance()\n return path", "def get_dataset_file_path() -> str:\n name = __DATASET_FILE__\n return os.path.join(get_project_root(), __COLOR_DATASET_FOLDER__ , name)", "def _download_datasets():\n def filepath(*args):\n return abspath(join(dirname(__file__), *args))\n for name in DATASETS_TO_DOWNLOAD:\n data = Dataset(name)\n url = data.url\n filename = filepath(data.filename)\n print(\"retrieving data {0} -> {1}\".format(url, filename))\n urlretrieve(url, filename)\n with open(filepath('listing.txt'), 'w') as f:\n f.write('\\n'.join(DATASETS_TO_DOWNLOAD) + '\\n')", "def data_urls(self):\n return [dinfo.data_url for dinfo in self.datainfo]", "def defineDataPaths():\n dataPaths = list()\n #dataPaths.append(\"./data\")\n dataPaths.append(\"./Data/Datasample_udacity\")\n #dataPaths.append(\"./01_Data/Track_1_back\")\n #dataPaths.append(\"./01_Data/Track_1_forw\")\n #dataPaths.append(\"./01_Data/Track_1_rec\")\n return dataPaths", "def __get_dataset_name(self):\n url_path = urlparse(self.dataset_url).path\n return os.path.basename(url_path)", "def build_paths(self) -> None:\n self.filepath = self.config['input_file']\n self.config['sub_dir'] = os.path.basename(self.filepath).split('.')[0]\n path = os.path.normpath(os.path.join(\n self.config['output_dir'],\n self.config['sub_dir'],\n ))\n self.config['path'] = path\n for file_type in ['train', 'test', 'val']:\n self.config[f'{file_type}_file'] = os.path.join(path, f'{file_type}.csv')", "def rei_url_helper(*, build_url, config, **_):\n # initiate url list for coa cropland data\n urls = []\n # replace \"__xlsx_name__\" in build_url to create three urls\n for x in config['files']:\n url = build_url\n url = url.replace(\"__filename__\", x)\n urls.append(url)\n return urls", "def _path(filename):\n fn = os.path.join(\"datasets\", filename)\n return [fn] if os.path.exists(fn) else []", "def _do_get_training_dataset_path(training_dataset, featurestore_metadata, training_dataset_version=1):\n training_datasets = featurestore_metadata[constants.REST_CONFIG.JSON_TRAINING_DATASETS]\n training_dataset_json = _find_training_dataset(training_datasets, training_dataset, training_dataset_version)\n hdfs_path = training_dataset_json[constants.REST_CONFIG.JSON_TRAINING_DATASET_HDFS_STORE_PATH] + \\\n constants.DELIMITERS.SLASH_DELIMITER + training_dataset_json[\n constants.REST_CONFIG.JSON_TRAINING_DATASET_NAME]\n data_format = training_dataset_json[constants.REST_CONFIG.JSON_TRAINING_DATASET_FORMAT]\n if data_format == constants.FEATURE_STORE.TRAINING_DATASET_NPY_FORMAT:\n hdfs_path = hdfs_path + constants.FEATURE_STORE.TRAINING_DATASET_NPY_SUFFIX\n if data_format == constants.FEATURE_STORE.TRAINING_DATASET_HDF5_FORMAT:\n hdfs_path = hdfs_path + constants.FEATURE_STORE.TRAINING_DATASET_HDF5_SUFFIX\n if data_format == constants.FEATURE_STORE.TRAINING_DATASET_IMAGE_FORMAT:\n hdfs_path = training_dataset_json[constants.REST_CONFIG.JSON_TRAINING_DATASET_HDFS_STORE_PATH]\n # abspath means \"hdfs://namenode:port/ is preprended\n abspath = pydoop.path.abspath(hdfs_path)\n return abspath", "def get_dataset_path(path, annotation, image_dir):\n if _dataset_exists(path, annotation, image_dir):\n return path\n\n data_name = os.path.split(path.strip().lower())[-1]\n if data_name not in DOWNLOAD_DATASETS_LIST:\n raise ValueError(\n \"Dataset {} is not valid for reason above, please check again.\".\n format(osp.realpath(path)))\n else:\n logger.warning(\n \"Dataset {} is not valid for reason above, try searching {} or \"\n \"downloading dataset...\".format(osp.realpath(path), DATASET_HOME))\n\n for name, dataset in DATASETS.items():\n if data_name == name:\n logger.debug(\"Parse dataset_dir {} as dataset \"\n \"{}\".format(path, name))\n data_dir = osp.join(DATASET_HOME, name)\n\n if name == \"spine_coco\":\n if _dataset_exists(data_dir, annotation, image_dir):\n return data_dir\n\n # For voc, only check dir VOCdevkit/VOC2012, VOCdevkit/VOC2007\n if name in ['voc', 'fruit', 'roadsign_voc']:\n exists = True\n for sub_dir in dataset[1]:\n check_dir = osp.join(data_dir, sub_dir)\n if osp.exists(check_dir):\n logger.info(\"Found {}\".format(check_dir))\n else:\n exists = False\n if exists:\n return data_dir\n\n # voc exist is checked above, voc is not exist here\n check_exist = name != 'voc' and name != 'fruit' and name != 'roadsign_voc'\n for url, md5sum in dataset[0]:\n get_path(url, data_dir, md5sum, check_exist)\n\n # voc should create list after download\n if name == 'voc':\n create_voc_list(data_dir)\n return data_dir\n\n raise ValueError(\"Dataset automaticly downloading Error.\")", "def get_scraping_output_paths(site):\n site_py = site.replace('.', '_')\n raw_data_path = Path('data', 'raw')\n feed_path = Path(raw_data_path, site_py, 'items.csv')\n images_path = Path(raw_data_path, site_py, 'images')\n return str(feed_path), str(images_path)", "def _get_all_url(cls) -> str:" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Starts a data retrieval job, with the desired function set of urls and keys
def start_parallel_job(func, urls, keys): job_input = list(zip(urls, keys)) job_workers = multiprocessing.cpu_count() * 2 job_chunksize = len(job_input) // job_workers with multiprocessing.Pool(job_workers) as p: p.starmap(func, job_input, job_chunksize)
[ "def run_job():", "def _worker_fn(url, dataset_fn, sampler_fn):\n dataset = dataset_fn(url)\n sampler = sampler_fn(dataset)\n return (dataset, sampler)", "async def fetch_policy_data(self, urls: Dict[str, FetcherConfig] = None) -> Dict[str, Any]:\n # return value - /fetch-results mapped by url\n results_by_url = {}\n # tasks\n tasks = []\n # if no url provided - default to the builtin route\n if urls is None:\n urls = {self._data_url: self._default_fetcher_config}\n # create a task for each url\n for url, config in urls.items():\n tasks.append(self._handle_url(url, config))\n # wait for all data fetches to complete\n results = await asyncio.gather(*tasks, return_exceptions=True)\n\n # Map results by urls\n results_by_url = {}\n for result in results:\n if not isinstance(result, Exception):\n url, response = result\n results_by_url[url] = response\n # return results\n return results_by_url", "def run_multiproccesing():\n\n print('--- Multiprocessing ---')\n with Pool(5) as executor:\n return list(executor.map(load_url, URLS))", "def main():\n #Get number of pages\n\n number_of_pages = get_number_of_pages()\n pages=list(np.arange(1,number_of_pages+1))\n timer_utils=TimerUtils()\n timer_utils.start(f'Start getting products information of {len(pages)} pages')\n\n # Split to different = number of process\n buckets = np.array_split(pages, NUMBER_OF_PROCESS)\n pool =Pool(NUMBER_OF_PROCESS)\n jobs=[]\n index\t=0\n\n now=dt.now()\n dt_string=now.strftime(\"%d/%m/%Y %H:%M:%S\")\n logging.info(f'Start\t{NUMBER_OF_PROCESS} workers\tat {dt_string}')\n\n #Create a global variable.\n while index\t<\tlen(buckets):\n process_id\t=index\n pages\t= buckets[index]\n process\t=pool.apply_async(get_products_df,\targs=(process_id,\tpages,))\n jobs.append(process)\n index\t+=1\n\n \t#C1ose the pool\n pool.close()\n\n \t#wait\tuntil\tfinishing\tall\tprocess\n results=[job.get() for job in\tjobs]\n timer_utils.stop(f'End getting products\tinformation\tof total {len(pages)} pages')", "def parallel_fetch(urllist: list, \n nodelist: list, \n cores: int,\n username: str, \n password:str):\n \n flatten_metrics = []\n try:\n # Partition\n urls_group = partition(urllist, cores)\n nodes_group = partition(nodelist, cores)\n\n fetch_args = []\n for i in range(cores):\n urls = urls_group[i]\n nodes = nodes_group[i]\n fetch_args.append((urls, nodes, username, password))\n\n with multiprocessing.Pool() as pool:\n metrics = pool.starmap(fetch, fetch_args)\n\n flatten_metrics = [item for sublist in metrics for item in sublist]\n except Exception as err:\n log.error(f\"Cannot parallel fetch data from idrac urls: {err}\")\n\n return flatten_metrics", "def run_single_thread():\n print('--- Single thread ---')\n return [load_url(x) for x in URLS]", "def _launch_query(id, download_dir, lkwargs):\n\n print(f'Querying MAST for {id}')\n search_results = lk.search_lightcurvefile(target=id, **lkwargs)\n if len(search_results) == 0:\n warnings.warn('LightKurve did not return %s cadence data for %s' % (lkwargs['cadence'], id))\n return []\n else:\n return search_results.download_all(download_dir=download_dir)", "def get_json_and_pdfs():\n\n # The function that downloads the JSON files\n # get_json.loop_dataframe()\n get_json.thread_download_json()\n\n # The function that extracts the proper arguments to pass to the function for downloading PDFs using multiprocessing.\n # That function requires a list of tuples, each tuple being a seperate set of arguments to pass.\n link_list = get_pdfs.get_urls(\"json-output\")\n\n # This function uses threading on the function that downloads PDFs, allowing us to download multiple PDFs at once,\n # speeding up the process.\n get_pdfs.thread_download_pdfs(link_list)", "def retrieve(queues, traces, args): # noqa: C901\n\n timefloor = infosys.queuedata.timefloor\n starttime = time.time()\n\n jobnumber = 0 # number of downloaded jobs\n getjob_requests = 0\n getjob_failures = 0\n print_node_info()\n\n while not args.graceful_stop.is_set():\n\n time.sleep(0.5)\n getjob_requests += 1\n\n if not proceed_with_getjob(timefloor, starttime, jobnumber, getjob_requests, args.getjob_requests,\n args.update_server, args.harvester_submitmode, args.harvester, args.verify_proxy, traces):\n # do not set graceful stop if pilot has not finished sending the final job update\n # i.e. wait until SERVER_UPDATE is DONE_FINAL\n check_for_final_server_update(args.update_server)\n args.graceful_stop.set()\n break\n\n # store time stamp\n time_pre_getjob = time.time()\n\n # get a job definition from a source (file or server)\n res = get_job_definition(args)\n logger.info('job definition = %s', str(res))\n\n if res is None:\n logger.fatal('fatal error in job download loop - cannot continue')\n # do not set graceful stop if pilot has not finished sending the final job update\n # i.e. wait until SERVER_UPDATE is DONE_FINAL\n check_for_final_server_update(args.update_server)\n args.graceful_stop.set()\n break\n\n if not res:\n getjob_failures += 1\n if getjob_failures >= args.getjob_failures:\n logger.warning('did not get a job -- max number of job request failures reached: %d', getjob_failures)\n args.graceful_stop.set()\n break\n\n delay = get_job_retrieval_delay(args.harvester)\n if not args.harvester:\n logger.warning('did not get a job -- sleep %d s and repeat', delay)\n for _ in range(delay):\n if args.graceful_stop.is_set():\n break\n time.sleep(1)\n else:\n # it seems the PanDA server returns StatusCode as an int, but the aCT returns it as a string\n # note: StatusCode keyword is not available in job definition files from Harvester (not needed)\n if 'StatusCode' in res and res['StatusCode'] != '0' and res['StatusCode'] != 0:\n getjob_failures += 1\n if getjob_failures >= args.getjob_failures:\n logger.warning('did not get a job -- max number of job request failures reached: %d',\n getjob_failures)\n args.graceful_stop.set()\n break\n\n logger.warning('did not get a job -- sleep 60s and repeat -- status: %s', res['StatusCode'])\n for i in range(60):\n if args.graceful_stop.is_set():\n break\n time.sleep(1)\n else:\n # create the job object out of the raw dispatcher job dictionary\n try:\n job = create_job(res, args.queue)\n except PilotException as error:\n raise error\n #else:\n # verify the job status on the server\n #try:\n # job_status, job_attempt_nr, job_status_code = get_job_status_from_server(job.jobid, args.url, args.port)\n # if job_status == \"running\":\n # pilot_error_diag = \"job %s is already running elsewhere - aborting\" % job.jobid\n # logger.warning(pilot_error_diag)\n # raise JobAlreadyRunning(pilot_error_diag)\n #except Exception as error:\n # logger.warning(\"%s\", error)\n # write time stamps to pilot timing file\n # note: PILOT_POST_GETJOB corresponds to START_TIME in Pilot 1\n add_to_pilot_timing(job.jobid, PILOT_PRE_GETJOB, time_pre_getjob, args)\n add_to_pilot_timing(job.jobid, PILOT_POST_GETJOB, time.time(), args)\n\n # add the job definition to the jobs queue and increase the job counter,\n # and wait until the job has finished\n put_in_queue(job, queues.jobs)\n\n jobnumber += 1\n while not args.graceful_stop.is_set():\n if has_job_completed(queues, args):\n # purge queue(s) that retains job object\n purge_queue(queues.finished_data_in)\n\n args.job_aborted.clear()\n args.abort_job.clear()\n logger.info('ready for new job')\n\n # re-establish logging\n logging.info('pilot has finished for previous job - re-establishing logging')\n logging.handlers = []\n logging.shutdown()\n establish_logging(debug=args.debug, nopilotlog=args.nopilotlog)\n pilot_version_banner()\n getjob_requests = 0\n add_to_pilot_timing('1', PILOT_MULTIJOB_START_TIME, time.time(), args)\n break\n time.sleep(0.5)\n\n # proceed to set the job_aborted flag?\n if threads_aborted():\n logger.debug('will proceed to set job_aborted')\n args.job_aborted.set()\n else:\n logger.debug('will not set job_aborted yet')\n\n logger.debug('[job] retrieve thread has finished')", "def test_get_cloud_job(self):\n pass", "def main():\n\n from sys import argv\n opts = getopts(argv)\n\n if \"-t\" in opts:\n task_name = opts[\"-t\"]\n else:\n print(\"Error: must specify -t\")\n return\n\n task_map = {\n \"coin_list\": import_coin_list,\n \"historical\": import_historical_data,\n \"current\": import_current_data,\n \"twitter\": import_twitter_data,\n \"analysis\": analysis_tasks,\n \"cc_stats\": cc_stats_task,\n \"db_stats\": db_stats,\n \"stocktwits\": import_stocktwits\n }\n\n if task_name not in task_map:\n print(\"Error: task {} should be one of {}\".format(task_name, list(task_map.keys())))\n return\n\n tasks.init()\n\n task_map[task_name]()", "def fetch_all(get_data, get_filename, format_data, get_next, updater=None):\n fetch = True\n complete_data = []\n index = 0\n while fetch:\n data = get_data(updater)\n write_json_file(data, get_filename(index))\n\n complete_data += format_data(data)\n index += 1\n try:\n updater = get_next(data)\n except Exception:\n fetch = False\n return complete_data", "def start(self):\n def range_reader(stream, size, url):\n page_num = stream.getvalue()\n # Map readers should return a list of values, so page_num is\n # explicitly converted to an integer and then wrapped into a\n # list. By doing this each mapper instance will get exactly\n # one page number\n # If we don't do this, the mapper API just reads the numbers\n # character by character and we end up fetching the same 10\n # pages: digits 0, 9 all through since each character of a number\n # should be one of these 10 digits.\n return [int(page_num)]\n\n job = Job()\n\n inputs = [('raw://%d' % (i)) for i in range(1, self.num_pages)]\n\n job.run(input=inputs, map=mapper, reduce=reducer,\n map_reader=range_reader, params=Params(\n query=self.query,\n trained_vectorizer=self.vectorizer\n ),\n required_modules=[\n ('vectorizer', os.path.join(datasettings.PROJECT_ROOT,\n 'analyzer',\n 'vectorizer.py'),),\n ('models', os.path.join(datasettings.PROJECT_ROOT,\n 'webui', 'fatninja',\n 'models.py'),),\n ])\n\n self.feature_vector, self.row_num_to_tweet_id_map = \\\n self.vectorizer.build_feature_matrix(job)\n\n self.classify()", "def run(self, datasets, **kwargs):\n return None", "def request_data(url_stats, url_rules, save_path_stats, save_path_rules):\n try:\n os.mkdir(os.path.dirname(save_path_stats))\n except:\n pass\n try:\n os.mkdir(os.path.dirname(save_path_rules))\n except:\n pass\n \n stats_req = urllib.request.Request(url_stats, headers={'User-Agent': 'ArkPlanner'})\n rules_req = urllib.request.Request(url_rules, headers={'User-Agent': 'ArkPlanner'})\n\n\n with urllib.request.urlopen(stats_req) as url:\n material_probs = json.loads(url.read().decode())\n with open(save_path_stats, 'w') as outfile:\n json.dump(material_probs, outfile)\n\n with urllib.request.urlopen(rules_req) as url:\n convertion_rules = json.loads(url.read().decode())\n with open(save_path_rules, 'w') as outfile:\n json.dump(convertion_rules, outfile)\n\n return material_probs, convertion_rules", "def fetch_basic_dataset(args=None):\n _args = dict(request_interval=0.3)\n if args is not None:\n _args.update(args)\n\n artist_slugs_with_cent_df = _fetch_all_artist_slugs()\n # TODO: For debug only\n artist_slugs_with_cent_df.to_hdf(os.path.expanduser(\n '~/tmp/wikiart/wikiart_artist_slugs.hdf5'), 'df', mode='w')\n\n artist_slugs = artist_slugs_with_cent_df.index.values\n print 'Fetching paintings urls'\n all_links = []\n for artist_idx, slug in enumerate(artist_slugs):\n sys.stdout.write('\\rArtist {:04d}/{}'.format(artist_idx, len(artist_slugs)))\n sys.stdout.flush()\n\n relative_page_urls = \\\n _get_paintings_relative_urls_by_artist_broot(artist_idx, len(artist_slugs),\n slug, _args['request_interval'])\n all_links.extend(relative_page_urls)\n time.sleep(_args['request_interval'])\n\n # TODO: for debug only. REMOVE\n if artist_idx % 200 == 0:\n print 'Saving df snapshot'\n tmp_df = _slugs_to_df(all_links, artist_slugs_with_cent_df)\n tmp_df.to_hdf(os.path.expanduser('~/tmp/wikiart/wikiart_basic_info_{}_artists.hdf5'\n .format(artist_idx)), 'df', mode='w')\n print ''\n # remove duplicates\n all_links = list(set(all_links))\n\n # Turn URLs into image ids and get other basic info.\n df = _slugs_to_df(all_links, artist_slugs_with_cent_df)\n return df", "def route_local_retrieval_instruction(self, instruction, key, args):\n if instruction not in self.store.retrieval_instructions:\n raise ClusterQueryException(\"unknown retrieval instruction: {}\".format(instruction))\n\n if self.status == Cluster.Status.INITIALIZING:\n raise ClusterQueryException('cannot query an initializing node')\n elif self.status == Cluster.Status.STREAMING:\n # forward the query to the streaming node\n return self._streaming_node.execute_retrieval_instruction(instruction, key, args)\n else:\n return getattr(self.store, instruction)(key, *args)", "def fetch_data(state):\n t0 = time.time()\n # Collect fetch methods for all dashboard modules\n fetch_methods = {module.id: getattr(module, 'fetch_data') for module in modules}\n # Create a thread pool: one separate thread for each dashboard module\n with concurrent.futures.ThreadPoolExecutor(max_workers=len(fetch_methods)) as executor:\n # Prepare the thread tasks\n tasks = {}\n for key, fetch_method in fetch_methods.items():\n task = executor.submit(fetch_method, state)\n tasks[task] = key\n # Run the tasks and collect results as they arrive\n results = {}\n for task in concurrent.futures.as_completed(tasks):\n key = tasks[task]\n results[key] = task.result()\n # Return results once all tasks have been completed\n t1 = time.time()\n timer.text = '(Execution time: %s seconds)' % round(t1 - t0, 4)\n return results" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads a WAV from bytes.
def from_bytes(wav_bytes: bytes, dtype: Optional[np.dtype] = None) -> Tuple[np.ndarray, int]: return wav_io_python_bindings.read_wav_impl(io.BytesIO(wav_bytes), dtype)
[ "def read_wave(path):\n with contextlib.closing(wave.open(path, \"rb\")) as wf:\n num_channels = wf.getnchannels()\n assert num_channels == 1\n sample_width = wf.getsampwidth()\n assert sample_width == 2\n sample_rate = wf.getframerate()\n assert sample_rate in (8000, 16000, 32000, 48000)\n pcm_data = wf.readframes(wf.getnframes())\n return pcm_data, sample_rate", "def read_audio(path):\n wave_reader = wave.open(path, \"rb\")\n wave_params = wave_reader.getparams()\n return wave_reader, wave_params", "def readWav():\n\n # Read the sound wave from the input.\n sound_wave = wave.open(sys.argv[1], \"r\")\n\n # Get parameters of the sound wave.\n nframes = sound_wave.getnframes()\n framerate = sound_wave.getframerate()\n params = sound_wave.getparams()\n duration = nframes / float(framerate)\n\n print(\"frame rate: %d \" % (framerate,))\n print(\"nframes: %d\" % (nframes,))\n print(\"duration: %f seconds\" % (duration,))\n print(scipy.array(sound_wave))\n\n return (sound_wave, nframes, framerate, duration, params)", "def convert_wav(\n self,\n wav_bytes: bytes,\n sample_rate: typing.Optional[int] = None,\n sample_width: typing.Optional[int] = None,\n channels: typing.Optional[int] = None,\n ) -> bytes:\n if sample_rate is None:\n sample_rate = self.sample_rate\n\n if sample_width is None:\n sample_width = self.sample_width\n\n if channels is None:\n channels = self.channels\n\n return subprocess.run(\n [\n \"sox\",\n \"-t\",\n \"wav\",\n \"-\",\n \"-r\",\n str(sample_rate),\n \"-e\",\n \"signed-integer\",\n \"-b\",\n str(sample_width * 8),\n \"-c\",\n str(channels),\n \"-t\",\n \"raw\",\n \"-\",\n ],\n check=True,\n stdout=subprocess.PIPE,\n input=wav_bytes,\n ).stdout", "def read_wav(wavfile):\n assert os.path.isfile(wavfile), \\\n 'ERROR: wivefile file %s does not exist' % wavfile\n\n x, fs, enc = audiolab.wavread(wavfile)\n if len(x.shape) >= 2:\n x = x[:, 0] # Make mono\n\n assert fs == 44100, \\\n \"ERROR: File %s is not sampled at 44100 Hz\" % wavfile\n\n return x, fs", "def wiimote_Load16bitMonoSampleWAV(*args):\n return _wiimote.wiimote_Load16bitMonoSampleWAV(*args)", "def ReadWaveFile(filename):\n f = wave.open(filename, 'rb')\n waveInfo = dict()\n waveInfo[\"nchannels\"] = f.getnchannels()\n waveInfo[\"framerate\"] = f.getframerate()\n waveInfo[\"nframes\"] = f.getnframes()\n waveInfo[\"samplewidth\"] = f.getsampwidth()\n str_data = f.readframes(waveInfo[\"nframes\"])\n\n # np.short is 16-bit length\n wave_data = np.fromstring(str_data, dtype=np.short) \n wave_data = wave_data.astype(np.float16)\n wave_data /= 32768.0\n wave_data.shape = -1, waveInfo[\"nchannels\"]\n return waveInfo, wave_data", "def wav_data(mono_wav):\n the_data = fft.data_from_file(mono_wav)\n return the_data", "def read_wav(filename):\n s,fs = load(filename) # scipy reads int\n s = np.array(s)/float(max(abs(s)))\n s = add_wgn(s) # Add jitter for numerical stability\n return fs,s", "def readWv(self,FileName): # Verified 2020.0115\n try:\n file = open(FileName, \"rb\")\n data = file.read()\n file.close()\n except:\n print(\"File open error (\"+ FileName+\")!\")\n return\n\n binaryStart = 0\n tags = \"\"\n Counter = 0\n ConverterSize = 20\n while (binaryStart == 0) & (Counter < len(data)):\n tags += data[Counter:Counter+ConverterSize].decode(\"ASCII\",\"ignore\")\n Counter += ConverterSize\n res = re.search(\"WAVEFORM.{0,20}:.{0,3}#\",tags)\n if res is not None:\n binaryStart = res.span()[1]\n\n if (Counter > len(data)) & (binaryStart == 0):\n print(\"Required tags not found, potentially incompatible file format!\")\n return\n\n res = re.search(\"SAMPLES[ ]*:[ ]*(?P<NumberOfSamples>[0-9]*)\",tags)\n self.NumberOfSamples = int(res.group(\"NumberOfSamples\"))\n res = re.search(\"CLOCK[ ]*:[ ]*(?P<SamplingRate>[0-9]*)\",tags)\n self.fSamplingRate = float(res.group(\"SamplingRate\"))\n data = list(struct.unpack(\"h\"*self.NumberOfSamples*2, data[binaryStart:-1])) #MMM data: IQ arry\n data = list(map(lambda x: x/32767.0, data)) #MMM consumes a lot of time\n self.__iqiq2complex__(data)", "def Load16bitMonoSampleWAV(*args):\n return _wiimote.wiimote_Load16bitMonoSampleWAV(*args)", "def read_wav_file(path):\n \n # Parse the input file's extension\n extension = os.path.splitext(path)[1]\n \n # Load the WAV file and set the output parameters\n try:\n if extension.lower() == '.wav':\n [fs, x] = wavfile.read(path)\n num_samples = len(x)\n try: \n num_channels = x.shape[1]\n except:\n num_channels = 1\n data = [] \n for channel in range(num_channels):\n if num_channels == 1:\n data.append(x.astype(np.float32)/float(2**15))\n else:\n data.append(x[0:,channel].astype(np.float32)/float(2**15))\n else:\n raise IOError(\"unknown file type\")\n return (-1,-1,-1)\n except: \n IOError(\"file not found\")\n return (-1,-1,-1)\n \n # Return the output data (tuple)\n return (data, fs, num_channels, num_samples)", "def maybe_convert_wav(\n self,\n wav_bytes: bytes,\n sample_rate: typing.Optional[int] = None,\n sample_width: typing.Optional[int] = None,\n channels: typing.Optional[int] = None,\n ) -> bytes:\n if sample_rate is None:\n sample_rate = self.sample_rate\n\n if sample_width is None:\n sample_width = self.sample_width\n\n if channels is None:\n channels = self.channels\n\n with io.BytesIO(wav_bytes) as wav_io:\n with wave.open(wav_io, \"rb\") as wav_file:\n if (\n (wav_file.getframerate() != sample_rate)\n or (wav_file.getsampwidth() != sample_width)\n or (wav_file.getnchannels() != channels)\n ):\n # Return converted wav\n return self.convert_wav(\n wav_bytes,\n sample_rate=sample_rate,\n sample_width=sample_width,\n channels=channels,\n )\n\n # Return original audio\n return wav_file.readframes(wav_file.getnframes())", "def read(self):\n raw_bytes = self.wav_in.readframes(self.nframes)\n struct_fmt = \"%u%s\" % (len(raw_bytes) / self.sampwidth, self.struct_fmt_code)\n data = wave.struct.unpack(struct_fmt, raw_bytes)\n if self.signed:\n data = [i / float(self.range/2) for i in data]\n else:\n data = [(i - float(range/2)) / float(range/2) for i in data]\n\n channels = []\n for i in range(self.nchannels):\n channels.append([data[j] for j in range(0, len(data), self.nchannels) ])\n\n return channels", "def get_wav_data():\n # Get requested word and query db for audio url\n word = request.args.get(\"word\")\n audio_url = services.query.get_audio_url(word)\n\n # Fetch audio file from MW API\n # and send to client\n data = requests.get(audio_url).content\n return send_file(BytesIO(data), mimetype='audio/wav')", "def read_wav(file):\n f=wave.open(file,\"r\")\n raw_data=f.readframes(f.getnframes())\n array=np.fromstring(raw_data,np.short)\n array.shape=-1,2\n array=array.T.astype(float)[0]\n samplerate=f.getframerate()\n f.close()\n return feature_normalize(array),samplerate", "def waveread(audio_name, separateChannels = True):\n # open wave file read binary\n if (audio_name.split(\".\")[-1] == \"wav\") | (audio_name.split(\".\")[-1] == \"WAV\"):\n wr = wave.open(audio_name, 'rb')\n else:\n print('wrong file format! only WAVE files are supported')\n return\n\n sampling_rate = wr.getframerate()\n chunk = wr.getnframes() # length of auidiofile\n bin_array = wr.readframes(chunk) # binary wave information\n channel_nr = wr.getnchannels()\n quantization = wr.getsampwidth()\n\n if channel_nr == 1 and quantization == 1: # 8 bit mono\n # binary to array with numbers\n data = np.array(struct.unpack('BB' * chunk, bin_array))\n # has values from 0 to 255, which have to be changed to [-1:1]\n wave_array = data-np.mean(data)\n wave_array = wave_array / np.max(abs(wave_array))\n\n left_channel = None\n right_channel = None\n mono_channel = wave_array\n if separateChannels:\n wave_array = de_interlace_channel(wave_array)\n\n return wave_array, sampling_rate, left_channel, right_channel, mono_channel\n\n elif channel_nr == 1 and quantization == 2: # 16 bit mono\n # binary to array with numbers\n data = np.array(struct.unpack('h' * int((len(bin_array) / 2)), bin_array))\n wave_array = data / np.max(abs(data))\n\n left_channel = None\n right_channel = None\n mono_channel = wave_array\n\n if separateChannels:\n wave_array = de_interlace_channel(wave_array)\n\n return wave_array, sampling_rate, left_channel, right_channel, mono_channel\n\n elif channel_nr == 2 and quantization == 1: # 8 bit stereo\n # binary to array with numbers\n data = np.array(struct.unpack('BB' * chunk, bin_array))\n # has values from 0 to 255, which have to be changed to [-1:1]\n wave_array = data - np.mean(data)\n\n # Define channels and avoid clipping\n left_channel = wave_array[::2] / np.max(abs(wave_array))\n right_channel = wave_array[1::2] / np.max(abs(wave_array))\n mono_channel = left_channel + right_channel\n mono_channel = mono_channel / np.max(abs(mono_channel))\n wave_array = wave_array / np.max(abs(wave_array))\n if separateChannels:\n wave_array = de_interlace_channel(wave_array)\n\n return wave_array, sampling_rate, left_channel, right_channel, mono_channel\n\n elif channel_nr == 2 and quantization == 2: # 16 bit stereo\n # stero handling\n data = np.array(struct.unpack('hh' * chunk, bin_array))\n\n left_channel = data[::2] / np.max(abs(data))\n right_channel = data[1::2] / np.max(abs(data))\n mono_channel = left_channel + right_channel\n mono_channel = mono_channel / np.max(abs(mono_channel))\n wave_array = data / np.max(abs(data))\n if separateChannels:\n wave_array = de_interlace_channel(wave_array)\n\n return wave_array, sampling_rate, left_channel, right_channel, mono_channel\n\n else:\n print(\"not supported channel number or quantization\")\n\n return", "def pcm_channels(wave_file):\n global integer_data\n stream = wave.open(wave_file,\"rb\")\n\n num_channels = stream.getnchannels()\n sample_rate = stream.getframerate()\n sample_width = stream.getsampwidth()\n num_frames = stream.getnframes()\n\n raw_data = stream.readframes( num_frames ) # Returns byte data\n stream.close()\n\n total_samples = num_frames * num_channels\n\n if sample_width == 1: \n fmt = \"%iB\" % total_samples # read unsigned chars\n elif sample_width == 2:\n fmt = \"%ih\" % total_samples # read signed 2 byte shorts\n else:\n raise ValueError(\"Only supports 8 and 16 bit audio formats.\")\n\n integer_data = struct.unpack(fmt, raw_data)\n del raw_data # Keep memory tidy (who knows how big it might be)", "def read(filename):\n if not quiet: print 'loading', filename\n\n file = wave.open(filename, \"r\")\n file_frames = file.readframes(file.getnframes())\n\n snd = Sound()\n\n # check for mono files\n if file.getnchannels() == 1:\n file_frames = audioop.tostereo(file_frames, file.getsampwidth(), 0.5, 0.5)\n snd.params = file.getparams()\n snd.params = (2, snd.params[1], snd.params[2], snd.params[3], snd.params[4], snd.params[5])\n else:\n snd.params = file.getparams()\n\n snd.data = file_frames\n\n return snd" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Defining the placeholders for (batch) observations, actions and advantage values.
def define_placeholders(self): sy_ob_no = tf.placeholder(shape=[None, self.ob_dim], name="ob", dtype=tf.float32) if self.discrete: sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32) else: sy_ac_na = tf.placeholder(shape=[None, self.ac_dim], name="ac", dtype=tf.float32) sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32) return sy_ob_no, sy_ac_na, sy_adv_n
[ "def _create_placeholders(self):\n self._observations_ph = tf.placeholder(\n tf.float32,\n shape=[None, self._observation_dim],\n name='observations')\n\n self._next_observations_ph = tf.placeholder(\n tf.float32,\n shape=[None, self._observation_dim],\n name='next_observations')\n\n self._actions_ph = tf.placeholder(\n tf.float32, shape=[None, self._action_dim], name='actions')\n\n self._next_actions_ph = tf.placeholder(\n tf.float32, shape=[None, self._action_dim], name='next_actions')\n\n self._rewards_ph = tf.placeholder(\n tf.float32, shape=[None], name='rewards')\n\n self._dones_ph = tf.placeholder(\n tf.float32, shape=[None], name='dones')", "def add_placeholders(self):\n #load in the training examples, and their labels\n self.X = tf.placeholder(tf.float32, [self.Nbatch,self.maxlen,self.Nfeatures],name='X')\n self.y = tf.placeholder(tf.int32,[self.Nbatch,self.Noutputs],name='y')", "def _create_placeholders(self):\n\n _placeholder = tf.sparse.placeholder if self.sparse_input else tf.placeholder\n input_data = _placeholder('float', name='x-input')\n input_data_corr = _placeholder('float', name='x-corr-input')\n input_label = tf.placeholder('float', name='x-input-label')\n\n return input_data, input_data_corr, input_label", "def _build_summary_ops(self):\n with tf.variable_scope(self.scope, reuse=self.reuse):\n with tf.name_scope('summaries'):\n self.error_summary = tf.summary.scalar('td_error', tf.reduce_mean(tf.abs(self.td_error)))\n self.smiles = tf.placeholder(tf.string, [], 'summary_smiles')\n self.reward = tf.placeholder(tf.float32, [], 'summary_reward')\n smiles_summary = tf.summary.text('SMILES', self.smiles)\n reward_summary = tf.summary.scalar('reward', self.reward)\n self.episode_summary = tf.summary.merge([smiles_summary, reward_summary])", "def init_parameters(self):\r\n self.guessed_fishes_dict = {}\r\n self.train_index = 0", "def _prepare_multi_resolution_inputs(self):\n self.y_gt_bold = tf.placeholder(tf.float32, shape=(ctx.PPD_M1, 3))\n self.y_gt_mid = tf.placeholder(tf.float32, shape=(ctx.PPD_M2, 3))\n self.y_gt_fine = self.y_gt", "def get_placeholders(self):\n\n self.output_placeholder = tf.placeholder(tf.float32, shape=(config.batch_size, config.max_phr_len, config.output_features),\n name='output_placeholder') \n\n\n self.phoneme_labels = tf.placeholder(tf.int32, shape=(config.batch_size, config.max_phr_len),\n name='phoneme_placeholder')\n self.phone_onehot_labels = tf.one_hot(indices=tf.cast(self.phoneme_labels, tf.int32), depth = config.num_phos)\n \n self.f0_placeholder = tf.placeholder(tf.float32, shape=(config.batch_size, config.max_phr_len, 1),\n name='f0_placeholder')\n\n\n self.singer_labels = tf.placeholder(tf.float32, shape=(config.batch_size),name='singer_placeholder')\n self.singer_onehot_labels = tf.one_hot(indices=tf.cast(self.singer_labels, tf.int32), depth = config.num_singers)\n\n\n self.is_train = tf.placeholder(tf.bool, name=\"is_train\")", "def add_placeholders(self):\n\n self.perm_placeholder = tf.placeholder(tf.float32, (None, self.config.nx,self.config.nx,1), name = \"perm\")\n self.pressure_placeholder = tf.placeholder(tf.float32, (None, self.config.nx,self.config.nx,1), name = \"pressure\")\n self.U_face_operator_placeholder = tf.sparse_placeholder(tf.float32, (None, self.config.nfaces,self.config.nx*self.config.nx), name = \"U_face_operator\")\n self.U_face_fixed_placeholder = tf.placeholder(tf.float32, (None, self.config.nfaces), name = \"U_face_fixed\")\n self.U_face_placeholder = tf.placeholder(tf.float32, (None, self.config.nfaces), name = \"U_face\")\n\n self.is_training = tf.placeholder(tf.bool)", "def add_placeholders(self):\n \n with tf.variable_scope(\"Inputs\"):\n \n # Inputs\n self.X_input = tf.placeholder(\"float\", [None, self.dim_input], name='X_input')\n self.Pij_mask = tf.placeholder(\"float\", [None, None], name='Pij_mask')\n \n # Hyperparams\n self.ALPHA = tf.placeholder(tf.float32, name='ALPHA')\n self.LAMBDA = tf.placeholder(tf.float32, name='LAMDBDA')\n self.SIGMA = tf.placeholder(tf.float32, name='SIGMA')\n self.DROPOUT_FRACTION = tf.placeholder(tf.float32, name='DROPOUT_FRACTION')", "def __init__(self, observations, observations_are_data=True):\n ## Subclasses can override this method to hardcode observations if desired\n if observations_are_data:\n self.obs_summaries = self.summaries(observations)\n else:\n self.obs_summaries = observations", "def _setup_prediction_op(self):", "def create_placeholders(self):\n self.inputs_pl_list.append(\n tf.placeholder(tf.float32,\n shape=[None, None, self.input_size *\n self.num_stack * self.splice],\n name='input'))\n self.labels_pl_list.append(\n tf.SparseTensor(tf.placeholder(tf.int64, name='indices'),\n tf.placeholder(tf.int32, name='values'),\n tf.placeholder(tf.int64, name='shape')))\n self.inputs_seq_len_pl_list.append(\n tf.placeholder(tf.int32, shape=[None], name='inputs_seq_len'))\n self.keep_prob_pl_list.append(\n tf.placeholder(tf.float32, name='keep_prob'))", "def attention_variables(attention_size,hidden_size):\n # Trainable parameters\n # init s0 vector, [B, A]\n batch_init_state = tf.get_variable('s',\n dtype=tf.float32,\n shape=[batch_size, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.01))\n Ua = tf.get_variable('Ua',\n dtype=tf.float32,\n shape=[hidden_size, attention_size],\n initializer=tf.truncated_normal_initializer(stddev=0.01))\n Wa = tf.get_variable('Wa',\n dtype=tf.float32,\n shape=[hidden_size, attention_size],\n initializer=tf.truncated_normal_initializer(stddev=0.01))\n va = tf.get_variable('va',\n dtype=tf.float32,\n shape=[attention_size],\n initializer=tf.truncated_normal_initializer(stddev=0.01))\n Ur = tf.get_variable('Ur',\n dtype=tf.float32,\n shape=[hidden_size, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.01))\n Cr = tf.get_variable('Cr',\n dtype=tf.float32,\n shape=[attention_size, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.01))\n Uz = tf.get_variable('Uz',\n dtype=tf.float32,\n shape=[hidden_size, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.01))\n Cz = tf.get_variable('Cz',\n dtype=tf.float32,\n shape=[attention_size, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.01))\n U = tf.get_variable('U',\n dtype=tf.float32,\n shape=[hidden_size, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.01))\n C = tf.get_variable('C',\n dtype=tf.float32,\n shape=[attention_size, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.01))\n return batch_init_state, Ua, Wa, va, Ur, Cr, Uz, Cz, U, C", "def _init_critic_update(self):\n Q_target = tf.stop_gradient(self._get_Q_target())\n\n assert Q_target.shape.as_list() == [None, 1]\n\n Q_values = self._Q_values = tuple(\n Q([self._observations_ph, self._actions_ph])\n for Q in self._Qs)\n\n Q_losses = self._Q_losses = tuple(\n tf.losses.mean_squared_error(\n labels=Q_target, predictions=Q_value) # , weights=0.5)\n for Q_value in Q_values)\n\n Q_loss=tf.reduce_sum(Q_losses)\n\n Q_optimizer =tf.train.AdamOptimizer(\n learning_rate=self._Q_lr,\n name='{}_{}_optimizer'.format('Q',1)\n )\n\n self._Q_optimizers=Q_optimizer\n\n train_var=self._Qs[0].trainable_variables+self._Qs[1].trainable_variables\n Q_training_ops =tf.contrib.layers.optimize_loss(\n Q_loss,\n self.global_step,\n learning_rate=self._Q_lr,\n optimizer=Q_optimizer,\n variables=train_var,\n increment_global_step=False,\n summaries=((\n \"loss\", \"gradients\", \"gradient_norm\", \"global_gradient_norm\"\n ) if self._tf_summaries else ()))\n\n\n self._training_ops.update({'Q': tf.group(Q_training_ops)})", "def learn(self, reward, observation):", "def check_and_update_param_values_helper( self, inputs, values, trans, messages, context=None, prefix=\"\" ):\n context = ExpressionContext( values, context )\n for input in inputs.itervalues():\n # No value, insert the default\n if input.name not in values:\n messages.append( prefix + input.label )\n values[input.name] = input.get_initial_value( trans, context )\n # Value, visit recursively as usual\n else:\n if isinstance( input, Repeat ):\n for i, d in enumerate( values[ input.name ] ):\n rep_prefix = prefix + \"%s %d > \" % ( input.title, i + 1 )\n self.check_and_update_param_values_helper( input.inputs, d, trans, messages, context, rep_prefix )\n elif isinstance( input, Conditional ):\n group_values = values[ input.name ]\n current = group_values[\"__current_case__\"]\n self.check_and_update_param_values_helper( input.cases[current].inputs, group_values, trans, messages, context, prefix )\n else:\n # Regular tool parameter, no recursion needed\n pass", "def __init__(self, params):\n if params:\n raise ValueError(f\"Observation parameters not supported; passed {params}\")\n # The observation should contain a 1-D tensor in `self.tensor` and a\n # dictionary of views onto the tensor, which may be of any shape.\n # Here the observation is indexed `(cell state, row, column)`.\n shape = (1 + _NUM_PLAYERS, _NUM_ROWS, _NUM_COLS)\n self.tensor = np.zeros(np.prod(shape), np.float32)\n self.dict = {\"observation\": np.reshape(self.tensor, shape)}", "def generative_parameters(self):\n pass", "def sample_goal_params(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sampling an action from policy distribution. For discrete action space, we sample from the categorical distribution. For continuous action space, we sample from a normal distribution and construct the action with mean and log_std(taking an exp) parameters.
def sample_action(self, policy_parameters): if self.discrete: sy_logits_na = policy_parameters sy_sampled_ac = tf.squeeze(tf.multinomial(sy_logits_na, num_samples=1), axis=1) else: sy_mean, sy_logstd = policy_parameters z = tf.random_normal(shape=tf.shape(sy_mean)) sy_sampled_ac = sy_mean + tf.exp(sy_logstd) * z return sy_sampled_ac
[ "def sample_action(self,policy):\n action_indx = bernoulli.rvs(policy[2])\n return policy[action_indx]", "def sample_actions(available_actions, policy):\n\n def sample(probs, name):\n dist = Categorical(\n probs=probs,\n allow_nan_stats=False,\n name=name) # XXX Categorical/logits/Log:0: NaN\n return dist.sample()\n\n fn_pi, arg_pis = policy\n fn_pi = mask_unavailable_actions(available_actions, fn_pi)\n fn_samples = sample(fn_pi, name=\"Categorical-fn\")\n\n arg_samples = dict()\n for i, (arg_type, arg_pi) in enumerate(arg_pis.items()):\n arg_samples[arg_type] = sample(arg_pi, name=\"Categorical-arg-%d\"%i)\n\n return (fn_samples, arg_samples, fn_pi)", "def act(self, state):\n state = Variable(torch.FloatTensor(state)).unsqueeze(0) # adds extra dim when single input\n state = self.vari_gpu(state)\n _, action_mean = self.policy_net(state)\n #print('act:model action ',action_mean)\n \n # builds distribution\n # if action is out of env action range, resample it\n high = self.env.action_space.high\n low = self.env.action_space.low \n while True:\n action_distribution = MultivariateNormal(\n action_mean,torch.abs(self.stds))\n action = action_distribution.sample() # random action sampling\n if ((action.cpu().numpy() <= high) and \n (action.cpu().numpy() >= low)):\n break\n \n # log probability of chosen action\n log_prob = action_distribution.log_prob(action).reshape(1)\n return action, log_prob", "def sample_action(self, state):\n # YOUR CODE HERE\n action = np.random.choice([0, 1], p=self.get_probs([state, state], [0, 1]))\n return action", "def pick_action(self, policy, state, epsilon_exploration=None):\n if self.action_types == \"DISCRETE\":\n if random.random() <= epsilon_exploration:\n action = random.randint(0, self.action_size - 1)\n return action\n\n state = torch.from_numpy(state).float().unsqueeze(0)\n actor_output = policy.forward(state) # FIX IT\n # means = torch.clamp(actor_output[:,0:self.environment.get_action_size()], min=-1000, max=1000)\n # stds = torch.clamp(actor_output[:, self.environment.get_action_size():], min=-5,max=5)\n\n PI = 3.1415026\n means = torch.clamp(actor_output[:,0:self.environment.get_action_size()], min=-PI, max=PI)\n stds = torch.clamp(actor_output[:, self.environment.get_action_size():], min=-PI/20,max=PI/20)\n \n # stds = log_stds.exp()\n actor_output = torch.cat((means, stds),1)\n \n if self.action_choice_output_columns is not None:\n actor_output = actor_output[:, self.action_choice_output_columns]\n action_distribution = create_actor_distribution(self.action_types, actor_output, self.action_size)\n action = action_distribution.sample().cpu()\n\n if self.action_types == \"CONTINUOUS\": \n action += torch.Tensor(self.noise.sample())\n else: \n action = action.item()\n return action", "def mc_control_importance_sampling(env, num_episodes, behavior_policy, discount_factor=1.0):\n \n # The final action-value function.\n # A dictionary that maps state -> action values\n returns_sum = defaultdict(lambda: np.zeros(env.action_space.n))\n returns_count = defaultdict(lambda: np.zeros(env.action_space.n))\n Q = defaultdict(lambda: np.zeros(env.action_space.n))\n C = defaultdict(lambda: np.zeros(env.action_space.n))\n # Our greedily policy we want to learn\n target_policy = create_greedy_policy(Q)\n for i_episode in range(1, num_episodes + 1):\n # Print out which episode we're on, useful for debugging.\n if i_episode % 1000 == 0:\n print(\"\\rEpisode {}/{}.\".format(i_episode, num_episodes), end=\"\")\n sys.stdout.flush()\n\n # Generate an episode.\n # An episode is an array of (state, action, reward) tuples.\n episode = []\n state = env.reset()\n for t in range(100):\n # Sample an action from our policy\n probs = behavior_policy(state)\n action = np.random.choice(np.arange(len(probs)), p=probs)\n next_state, reward, done, _ = env.step(action)\n episode.append((state, action, reward))\n if done:\n break\n state = next_state\n\n for state, action, _ in episode:\n # Get the index j of first occurence of the state-action pair.\n first_occurence_idx = next(i for i,(state_i, action_i, _) in enumerate(episode) if state_i == state and action_i == action)\n # Calculate the return starting from step j --\n G = sum([(discount_factor**i)*reward_i for i,(_, _, reward_i) in enumerate(episode[first_occurence_idx:])])\n # and get the importance weight as defined in the lecture.\n W = np.prod([target_policy(state_i)[action_i]/behavior_policy(state_i)[action_i] for (state_i, action_i, _) in episode[first_occurence_idx:]])\n # We increase counter and total sum of that pair --\n returns_sum[state][action] += (W * G)\n returns_count[state][action] += 1\n # and update the Q function. Since our target policy acts greedily on Q, we implicitly also update the policy.\n Q[state][action] = returns_sum[state][action]/returns_count[state][action]\n return Q, target_policy", "def sample_actions(self, agent_outputs):\n mu, var, q = agent_outputs\n mu_np, sigma_np = mu.detach().cpu().numpy(), torch.sqrt(var).detach().cpu().numpy()\n actions = np.random.normal(mu_np, sigma_np)\n actions = np.clip(actions, self.env.action_space.low, self.env.action_space.high)\n return actions", "def get_random_action(self, *args):\n action = self.env.action_space.sample()\n return action", "def sample_action(strategy, available_actions: typing.Union[None, typing.List]=None):\n actions = [a for a in strategy]\n if available_actions is not None:\n actions = [a for a in actions if a in available_actions]\n probs = np.array([strategy[a] for a in actions])\n\n assert np.sum(probs) > 0.0, print(\"Oops: {}, {}\".format(probs, actions))\n\n probs = probs / np.sum(probs)\n\n idx = np.random.choice(list(range(len(actions))), p=probs)\n return actions[idx]", "def random_action(self):\n self.action_new = self._env.action_space.sample()\n return self.action_new", "def act(self, state, eps):\n dice = random.random()\n if (dice < eps):\n # Random act\n action = np.random.choice(self.action_space)\n else:\n # Follow policy\n #print(state.shape)\n if self.conv_mode:\n action = self.policy_net.predict(state[np.newaxis, :,:,:])\n else:\n action = self.policy_net.predict(state[np.newaxis, :])\n action = np.argmax(action, axis=1)[0]\n #print(\"The action: {}\".format(action))\n \n return action", "def sample_actions(self, observations, session):\n return session.run(self.policy.sample, feed_dict={\n self.observations_placeholder: observations\n }).tolist()", "def sample_state_action(self):\n\n # Not sure if state should be sampled first and then action rather than both simultaneously.\n # Doing this for now for simplicity.\n r = np.random.randint(len(self.model.keys()))\n (state, action) = list(self.model.keys())[r]\n return (state, action)", "def sample_episode(self, state, policies):\n\n if state.is_terminal():\n return np.array(state.returns(), dtype=np.float32)\n elif state.is_chance_node():\n outcomes = []\n probs = []\n for action, prob in state.chance_outcomes():\n outcomes.append(action)\n probs.append(prob)\n outcome = np.random.choice(outcomes, p=probs)\n state.apply_action(outcome)\n return self.sample_episode(state, policies)\n else:\n player = state.current_player()\n state_policy = _policy_dict_at_state(policies[player], state)\n actions = []\n probs = []\n for action in state_policy:\n actions.append(action)\n probs.append(state_policy[action])\n action = np.random.choice(actions, p=probs)\n state.apply_action(action)\n return self.sample_episode(state, policies)", "def sample_policy(self, obs):\n mu = self.logits(obs)\n pi = torch.distributions.Normal(loc=mu, scale=torch.exp(self.log_std))\n\n return pi", "def sample_episode(env, policy):\n states = []\n actions = []\n rewards = []\n dones = []\n \n # YOUR CODE HERE\n state = env.reset()\n states.append(state)\n while True:\n action = policy.sample_action(state)\n next_state, reward, done, _ = env.step(action)\n states.append(next_state) if not done else None\n actions.append(action)\n rewards.append(reward)\n dones.append(done)\n if done:\n break\n state = next_state\n return states, actions, rewards, dones", "def get_action(self, state):\n state = torch.FloatTensor(state).unsqueeze(0).to(self.device)\n # calculate Gaussian distribusion of (mean, log_std)\n mean, log_std = self.forward(state)\n std = log_std.exp()\n\n normal = Normal(0, 1)\n # sample actions\n z = normal.sample().to(self.device)\n action = torch.tanh(mean + std * z)\n\n action = action.cpu() # .detach().cpu().numpy()\n return action[0]", "def sample_actions(self, agent_outputs):\n logits, q = agent_outputs\n probs = F.softmax(logits, dim=-1)\n return torch.multinomial(probs, 1)[:, 0].cpu().data.numpy()", "def _choose_action_from_policy(self, state):\n random_choice = np.random.uniform() < self.epsilon\n if random_choice:\n action = np.random.choice(4, 1, p=[0.25, 0.25, 0.25, 0.25])[0]\n else:\n actions_q_values = self.Q[state]\n action = max(actions_q_values, key=actions_q_values.get)\n return action" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Building computation graph for policy gradient algorithm.
def build_computation_graph(self): # Defining placeholders for obs/states, actions and advantage values. self.sy_ob_no, self.sy_ac_na, self.sy_adv_n = self.define_placeholders() # Computing the logits. self.policy_parameters = self.policy_forward_pass(self.sy_ob_no) # Sampling an action according to our policy. self.sy_sampled_ac = self.sample_action(self.policy_parameters) # Computing log_probs of chosen actions. self.sy_logprob_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na) # Defining the loss function. # http://rail.eecs.berkeley.edu/deeprlcourse/static/slides/lec-5.pdf loss = tf.reduce_mean(self.sy_logprob_n * self.sy_adv_n) self.update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(loss) if self.nn_baseline: # Create the value network. self.baseline_prediction = tf.squeeze(build_mlp( self.sy_ob_no, 1, "nn_baseline", n_layers=self.n_layers, size=self.size)) # Placeholder for target values which will be used in the loss function for value network. self.sy_target_n = tf.placeholder(dtype=tf.float32, shape=[None], name='sy_target_n') # Define the loss function for value network. Basically MSE loss. baseline_loss = tf.reduce_mean((self.baseline_prediction - self.sy_target_n) ** 2) self.baseline_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(baseline_loss)
[ "def _setup_connected_gradients(self):\n # Index relevant variables based on self.goal_indices\n meta_obs0 = self.crop_to_goal(self.policy[0].obs_ph)\n meta_obs1 = self.crop_to_goal(self.policy[0].obs1_ph)\n worker_obs0 = self.crop_to_goal(self.policy[-1].obs_ph)\n worker_obs1 = self.crop_to_goal(self.policy[-1].obs1_ph)\n\n if self.relative_goals:\n # Relative goal formulation as per HIRO.\n goal = meta_obs0 + self.policy[0].actor_tf - meta_obs1\n else:\n # Goal is the direct output from the meta policy in this case.\n goal = self.policy[0].actor_tf\n\n # Concatenate the output from the manager with the worker policy.\n obs_shape = self.policy[-1].ob_space.shape[0]\n obs = tf.concat([self.policy[-1].obs_ph[:, :obs_shape], goal], axis=-1)\n\n # Create the worker policy with inputs directly from the manager.\n with tf.compat.v1.variable_scope(\"level_1/model\"):\n worker_with_meta_obs = self.policy[-1].make_critic(\n obs, self.policy[-1].action_ph, reuse=True, scope=\"qf_0\")\n\n # Create a tensorflow operation that mimics the reward function that is\n # used to provide feedback to the worker.\n if self.intrinsic_reward_type.startswith(\"scaled\"):\n # Scale the observations/goals by the action space of the upper-\n # level policy if requested.\n ac_space = self.policy[0].ac_space\n scale = 0.5 * (ac_space.high - ac_space.low)\n worker_obs0 /= scale\n goal /= scale\n worker_obs1 /= scale\n\n if self.relative_goals:\n # Implement relative goals if requested.\n goal += worker_obs0\n\n if self.intrinsic_reward_type.endswith(\"exp_negative_distance\"):\n reward_fn = tf.reduce_mean(tf.exp(-tf.reduce_sum(\n tf.square(worker_obs0 + goal - worker_obs1), axis=1)))\n elif self.intrinsic_reward_type.endswith(\"negative_distance\"):\n reward_fn = -tf.compat.v1.losses.mean_squared_error(\n worker_obs0 + goal, worker_obs1)\n else:\n raise ValueError(\"Unknown intrinsic reward type: {}\".format(\n self.intrinsic_reward_type))\n\n # Scale by the worker reward scale.\n reward_fn *= self.intrinsic_reward_scale\n\n # Compute the worker loss with respect to the meta policy actions.\n self.cg_loss = - tf.reduce_mean(worker_with_meta_obs) - reward_fn\n\n # Create the optimizer object.\n optimizer = tf.compat.v1.train.AdamOptimizer(self.policy[0].actor_lr)\n self.cg_optimizer = optimizer.minimize(\n self.policy[0].actor_loss + self.cg_weights * self.cg_loss,\n var_list=get_trainable_vars(\"level_0/model/pi/\"),\n )", "def build_gradient_graph(accessor, user_args_requiring_grad, user_args_not_requiring_grad, output_names):\n\n model = accessor.model\n\n quant_params = qat_utils.get_quant_params(model)\n\n # Collect names of parameters that need gradients computed\n all_args_requiring_gradient = []\n # Move all trainable and non trainable initializers to graph inputs.\n # This allows training to pass in the parameters from outside the graph\n # so as to share the parameters across multiple sessions.\n graph_inputs = model.graph.input\n initializers = []\n for initializer in model.graph.initializer:\n if not initializer.name.startswith(\"onnx::\") and initializer.name not in quant_params:\n # Move only those initializers as inputs that are not local\n # to the onnx model. i.e. initializers that are model parameters.\n # These are tpically those initializers without any onnx:: prefixed\n # to their names.\n graph_inputs.append(\n onnx.helper.make_tensor_value_info(initializer.name, initializer.data_type, initializer.dims)\n )\n if initializer.name not in user_args_not_requiring_grad:\n all_args_requiring_gradient.append(initializer.name)\n else:\n # All other initializers (including any quantization parameter) stay where they were.\n initializers.append(initializer)\n\n # Update the initializers in the graph\n del model.graph.initializer[:]\n model.graph.initializer.extend(initializers)\n\n # At this point, we have the eval model\n accessor.eval_model = copy.deepcopy(model)\n\n # Any graph input that requires gradient, should have been already added to\n # args_requiring_grad. So, add these arguments to set of arguments\n # whose gradient should be built.\n for argument_name in user_args_requiring_grad:\n all_args_requiring_gradient.append(argument_name)\n\n # Run graph transformations to optimize the graph\n model.CopyFrom(\n onnx.load_from_string(get_optimized_model(model.SerializeToString(), set(all_args_requiring_gradient)))\n )\n\n # Assumption is that the first graph output is the loss output\n if isinstance(output_names, str):\n output_names = [output_names]\n builder = GradientGraphBuilder(\n model.SerializeToString(),\n set(output_names),\n set(all_args_requiring_gradient),\n output_names[0],\n )\n builder.build()\n gradient_model = onnx.load_from_string(builder.get_model())\n\n # Reorder gradient outputs for the gradient model based on the all_args_requiring_gradient order\n _reorder_outputs(gradient_model, output_names, all_args_requiring_gradient)\n\n # copy the gradient model into the user's model\n model.CopyFrom(gradient_model)\n\n return all_args_requiring_gradient", "def build_graph(self):\r\n self._create_placeholders()\r\n self._create_network()\r\n self._create_loss()\r\n self._create_optimizer()\r\n self._create_summaries()\r\n self._show_current_model()", "def _build_graph(self):\n start_t = time.time()\n self._setup_placeholders()\n self._embed()\n if 'input' in self.elmo_positions:\n if self.elmo_utils:\n self._elmo_embed_input_with_cache()\n\n if self.algo == 'MEMNET':\n # self._run_memory_network(gated=self.gated_memnet)\n raise NotImplementedError(\"self.algo {} is not implemented\".format(self.algo))\n else:\n # encode layers\n if self.dial_encode == 'CONCAT':\n self._encode()\n self._word_match_for_concated()\n elif self.dial_encode == 'HIERARCHY':\n # for now, we still use the concated encoding at the same time\n self._encode()\n # hierarchy encode\n self._hierarchy_encode()\n self._word_match_for_concated()\n else:\n raise NotImplementedError(\"dial_encode {} is not implemented\".format(self.dial_encode))\n\n if 'SEQTAG' in self.decode_goal:\n self._decode_seq_tags()\n else:\n self._decode_multiclass()\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n if self.train:\n with tf.control_dependencies(update_ops):\n self._create_train_op()\n self.logger.info('Time to build graph: {} s'.format(time.time() - start_t))\n param_num = sum([np.prod(self.sess.run(tf.shape(v))) for v in self.all_params])\n self.logger.info('There are {} parameters in the model'.format(param_num))", "def test_policy_gradient(self):\n model = VanillaPolicyGradient(self.hparams.env)\n self.trainer.fit(model)", "def calculate_gradients(self):\n\n print('Calculating gradients')\n fdtd = self.sim.fdtd\n self.gradient_fields = GradientFields(forward_fields = self.forward_fields, adjoint_fields = self.adjoint_fields)\n self.sim.fdtd.switchtolayout()\n if self.use_deps:\n self.geometry.d_eps_on_cad(self.sim)\n fom_partial_derivs_vs_wl = GradientFields.spatial_gradient_integral_on_cad(self.sim, 'forward_fields', 'adjoint_fields', self.adjoint_fields.scaling_factor)\n self.gradients = self.fom.fom_gradient_wavelength_integral(fom_partial_derivs_vs_wl.transpose(), self.forward_fields.wl)\n else:\n if hasattr(self.geometry,'calculate_gradients_on_cad'):\n fom_partial_derivs_vs_wl = self.geometry.calculate_gradients_on_cad(self.sim, 'forward_fields', 'adjoint_fields', self.adjoint_fields.scaling_factor)\n self.gradients = self.fom.fom_gradient_wavelength_integral(fom_partial_derivs_vs_wl, self.forward_fields.wl)\n else:\n fom_partial_derivs_vs_wl = self.geometry.calculate_gradients(self.gradient_fields)\n self.gradients = self.fom.fom_gradient_wavelength_integral(fom_partial_derivs_vs_wl, self.forward_fields.wl)\n return self.gradients", "def dPolicy_dH(self, policy_trials):\n\n DLogger.logger().debug(\"adding gradients for each cell-action pair...\")\n grads = []\n for a in policy_trials:\n grads.append(tf.gradients(self.policy[0, a, 0], self.state_in_pret)[0][0])\n grads = tf.convert_to_tensor(grads)\n grads = tf.transpose(grads, [2, 0, 1])\n DLogger.logger().debug(\"finished adding gradients.\")\n return grads", "def _build_graph(self):\n self.op_size = len(self._ops)\n op_node_connections = [(i, i + 1) for i in range(self.op_size - 1)]\n self._add_connections(op_node_connections)\n for i in range(self.op_size):\n self._uses[i].update(self._ops[i].input_arg_names())\n self._defs[i].update(self._ops[i].output_arg_names())", "def get_gradients(self, root):\n def down_fun(node, parent_vals):\n # Sum up all parent vals\n parent_vals = [pv for pv in parent_vals if pv is not None]\n if len(parent_vals) > 1:\n summed = tf.add_n(parent_vals, name=node.name + \"_add\")\n else:\n summed = parent_vals[0]\n self._true_gradients[node] = summed\n if node.is_op:\n # Compute for inputs\n if isinstance(node, BaseSum):\n kwargs = dict(dropconnect_keep_prob=self._dropconnect_keep_prob)\n else:\n kwargs = dict()\n with tf.name_scope(node.name):\n if self._log:\n return node._compute_log_gradient(\n summed, *[self._value.values[i.node]\n if i else None\n for i in node.inputs], **kwargs)\n else:\n return node._compute_log_gradient(\n summed, *[self._value.values[i.node]\n if i else None\n for i in node.inputs], **kwargs)\n\n # Generate values if not yet generated\n if not self._value.values:\n self._value.get_value(root)\n\n with tf.name_scope(\"Gradient\"):\n # Compute the tensor to feed to the root node\n graph_input = tf.ones_like(self._value.values[root])\n\n # Traverse the graph computing gradients\n self._true_gradients = {}\n compute_graph_up_down(root, down_fun=down_fun, graph_input=graph_input)", "def monte_carlo_policy_gradient(self):\n reward_sum = 0\n init_state = self.start_state # the initialized state\n while init_state != self.end_state:\n action = self.select_action() # choose an action according to the preference\n next_state, reward = self.find_next_state(init_state, action) # find the next state\n self.action_list.append(action) # add new action to the list of action\n self.reward_list.append(reward) # add new reward to the list of reward\n reward_sum += reward\n init_state = next_state # set initialized state equal to the next state\n\n returns = np.zeros(len(self.reward_list)) # set default return is zeros\n returns[-1] = self.reward_list[-1]\n for idx in range(2, len(self.reward_list) + 1):\n returns[-idx] = self.reward_list[-idx] + self.gamma*returns[-idx + 1] # update the returns of this episode\n\n gamma_power = 1\n for idx in range(len(returns)):\n row = self.action_list[idx]\n pi_func = self.compute_policy() # get the pi distribution\n gradient = self.feature_vector[:, row] - np.dot(pi_func, self.feature_vector)\n self.init_theta += self.alpha * gamma_power * returns[idx] * gradient # update the theta\n gamma_power *= self.gamma\n return reward_sum", "def __build_train_op(self) -> None:\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)\n # train only custom variables that are trainable\n var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.resnet.custom_scope.name)\n accum_vars = [tf.get_variable('{}/grad_accum'.format(var.op.name), var.shape, tf.float32, tf.zeros_initializer,\n trainable=False) for var in var_list]\n self.zero_gradients_op = [var.assign(tf.zeros_like(var)) for var in accum_vars]\n gradients = optimizer.compute_gradients(loss=self.resnet.loss, var_list=var_list,\n aggregation_method=tf.AggregationMethod.ADD_N)\n\n # insert UPDATE_OPS if needed\n self.accumulate_gradients_op = [accum_vars[i].assign_add(g[0]) for i, g in enumerate(gradients)]\n\n grad_scaling = 1. / self.virtual_batch_size_factor\n self.apply_gradients_op = optimizer.apply_gradients([\n (tf.multiply(accum_vars[i], grad_scaling), # accumulated, averaged gradients\n g[1]) # variable to update\n for i, g in enumerate(gradients)])", "def build_policy_network_op(self, scope = \"policy_network\"):\n #######################################################\n ######### YOUR CODE HERE - 8-12 lines. ############\n\n # logprob: log π_θ(a_t|s_t)\n if self.discrete:\n action_logits = build_mlp(self.observation_placeholder,\n self.action_dim,\n scope,\n self.config.n_layers,\n self.config.layer_size,\n self.config.activation)\n self.sampled_action = tf.squeeze(tf.multinomial(action_logits, 1))\n self.logprob = - tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.action_placeholder,\n logits=action_logits)\n else:\n action_means = build_mlp(self.observation_placeholder,\n self.action_dim,\n scope,\n self.config.n_layers,\n self.config.layer_size,\n self.config.activation)\n log_std = tf.get_variable(\"log_std\", shape=(1, self.action_dim))\n self.sampled_action = tf.random_normal((1,), mean=action_means, stddev=log_std)\n self.logprob = tfp.distributions.MultivariateNormalDiag(action_means, log_std)\n\n #######################################################\n ######### END YOUR CODE. ############", "def _build_policy_loss(self, i):\n pol_dist = self._policy_network.dist\n old_pol_dist = self._old_policy_network.dist\n\n # Initialize dual params\n self._param_eta = 15.\n self._param_v = np.random.rand(\n self._env_spec.observation_space.flat_dim * 2 + 4)\n\n with tf.name_scope('bellman_error'):\n delta_v = tf.boolean_mask(i.reward_var,\n i.valid_var) + tf.tensordot(\n i.feat_diff, i.param_v, 1)\n\n with tf.name_scope('policy_loss'):\n ll = pol_dist.log_prob(i.action_var)\n ll = tf.boolean_mask(ll, i.valid_var)\n loss = -tf.reduce_mean(\n ll * tf.exp(delta_v / i.param_eta -\n tf.reduce_max(delta_v / i.param_eta)))\n\n reg_params = self.policy.get_regularizable_vars()\n loss += self._l2_reg_loss * tf.reduce_sum(\n [tf.reduce_mean(tf.square(param))\n for param in reg_params]) / len(reg_params)\n\n with tf.name_scope('kl'):\n kl = old_pol_dist.kl_divergence(pol_dist)\n pol_mean_kl = tf.reduce_mean(kl)\n\n with tf.name_scope('dual'):\n dual_loss = i.param_eta * self._epsilon + (\n i.param_eta * tf.math.log(\n tf.reduce_mean(\n tf.exp(delta_v / i.param_eta -\n tf.reduce_max(delta_v / i.param_eta)))) +\n i.param_eta * tf.reduce_max(delta_v / i.param_eta))\n\n dual_loss += self._l2_reg_dual * (tf.square(i.param_eta) +\n tf.square(1 / i.param_eta))\n\n dual_grad = tf.gradients(dual_loss, [i.param_eta, i.param_v])\n\n self._f_dual = compile_function(\n flatten_inputs(self._dual_opt_inputs),\n dual_loss)\n\n self._f_dual_grad = compile_function(\n flatten_inputs(self._dual_opt_inputs),\n dual_grad)\n\n self._f_policy_kl = compile_function(\n flatten_inputs(self._policy_opt_inputs),\n pol_mean_kl)\n\n return loss", "def apply_gradient(params: torch.Tensor, grads: torch.Tensor, lr: float) -> torch.Tensor:\n params_prime = params + lr * grads\n # print(\"Gradient\", torch.mean(grads))\n return params_prime", "def accumulate_gradients(self):\n for k in range(self.last_layer, -1, -1):\n self.g[k] = self.g[k] + np.matmul(self.d[k].T, self.a[k])", "def apply_from_grad(self):\n with torch.no_grad():\n ra = self.running_avg_step\n bias_correction = 1 - (self.big_gamma ** self.step_count)\n eps = self.epsilon\n # Calculate gap from grad\n for pg in self.optimizer.param_groups:\n max_lr = pg[GapAwareBase.MAX_LR_NAME]\n if max_lr <= 0:\n continue\n weight_decay = pg['weight_decay']\n for p in pg['params']:\n # calculate C coefficient per-element\n avg_steps_needed = max_lr * \\\n (((ra[id(p)] / bias_correction) ** 0.5) + eps)\n\n # calculate the gap per-element\n penalty = 1 + (pg['lr'] * p.grad.abs() / avg_steps_needed)\n\n # Apply penalty to gradient\n p.grad /= penalty\n # Apply penalty to weight decay (as it will be part of the gradient)\n # HACK: we know that sgd does\n # d_p += p*wd\n # and we want:\n # d_p += p*wd/penalty\n # so we solve:\n # x + z + p*wd = x + (p*wd / penalty)\n # giving:\n # z = p*wd ((1/penalty) - 1) = ((1 - penalty) / penalty)\n # so we do\n # d_p += z\n # z = p * weight_decay * ((1 - penalty) / penalty)\n p.grad += p.mul(weight_decay * ((1 - penalty) / penalty))", "def build_graph(config):\n # placeholders\n pholders = build_placeholders(config)\n\n waymark_construction_results = tf_get_waymark_data(config, pholders)\n wmark0_data = waymark_construction_results.waymark0_data\n wmark_data = waymark_construction_results.waymark_data\n\n with tf.variable_scope(\"tre_model\"):\n\n idxs = config.initial_waymark_indices\n max_num_ratios = idxs[-1]\n\n energy_obj = build_energies(config=config,\n bridge_idxs=pholders.bridge_idxs,\n max_num_ratios=max_num_ratios,\n head_multiplier=pholders.head_multiplier\n )\n\n neg_energies = energy_obj.neg_energy(wmark_data, is_train=True, is_wmark_input=True)\n\n # build train loss & optimisation step\n tre_train_loss = build_train_loss(config, neg_energies, pholders.loss_weights)\n tre_optim_op = build_optimisers(tre_train_loss, pholders, config)\n\n # build validation operations\n val_neg_energies = energy_obj.neg_energy(wmark_data, is_train=False, is_wmark_input=True)\n loss_obj, val_loss, loss_terms, nwj_loss_op = build_val_loss(config, val_neg_energies)\n\n neg_energies_of_data = energy_obj.neg_energy(wmark0_data, is_train=False, is_wmark_input=False) # (n_batch, n_ratios)\n av_neg_energies_of_data = tf.reduce_mean(neg_energies_of_data, axis=0) # (n_ratios, )\n\n if \"2d\" in config.dataset_name or \"1d\" in config.dataset_name:\n noise_logprob = waymark_construction_results.noise_dist.log_prob(wmark0_data)\n bridges_and_noise_neg_e_of_data = tf.concat([neg_energies_of_data, tf.expand_dims(noise_logprob, axis=1)], axis=1)\n\n spec_norms = []\n if hasattr(energy_obj, \"model\"):\n for layer in energy_obj.model.layers:\n if hasattr(layer, \"spectral_norm\"):\n spec_norms.append(layer.spectral_norm)\n\n average_metric_ops = [\n loss_obj.acc,\n loss_obj.class1_acc,\n loss_obj.class2_acc,\n loss_obj.dawid_statistic_numerator,\n loss_obj.dawid_statistic_denominator,\n val_loss,\n nwj_loss_op,\n av_neg_energies_of_data\n ]\n\n graph = AttrDict(locals())\n graph.update(pholders)\n return graph # dict whose values can be accessed as attributes i.e. val = dict.key", "def _apply_gradient(self, loss, var_list, grad_dict):\n raise NotImplementedError(\"Please use subclass with specific algorithms, like boomdiff.optimize.GD\")", "def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost=False):\n\n costs = []\n\n for i in range(num_iterations):\n\n # Cost and gradient calculation (≈ 1-4 lines of code)\n ### START CODE HERE ###\n grads, cost = propagate(w, b, X, Y)\n ### END CODE HERE ###\n\n # Retrieve derivatives from grads\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n\n # update rule (≈ 2 lines of code)\n ### START CODE HERE ###\n w = w-(learning_rate*dw)\n b = b-(learning_rate*db)\n ### END CODE HERE ###\n\n # Record the costs\n if i % 100 == 0:\n costs.append(cost)\n\n # Print the cost every 100 training iterations\n if print_cost and i % 100 == 0:\n print(\"\\nCost after iteration %i: %f\" % (i, cost))\n print(\"w: \", w.shape)\n print(\"X: \", X.shape)\n print(\"b: \", b)\n print(\"Y: \", Y.shape)\n\n params = {\"w\": w,\n \"b\": b}\n\n grads = {\"dw\": dw,\n \"db\": db}\n\n return params, grads, costs" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes advantages by (possibly) subtracting a baseline from the estimated Q values. If not nn_baseline, we just return q_n.
def compute_advantage(self, ob_no, q_n): if self.nn_baseline: b_n = self.sess.run(self.baseline_prediction, feed_dict={self.sy_ob_no: ob_no}) # Match the statistics. b_n = np.mean(q_n) + np.std(q_n) * b_n adv_n = q_n - b_n else: adv_n = q_n.copy() return adv_n
[ "def estimate_advantage(self, obs, q_values):\n\n # Estimate the advantage when nn_baseline is True,\n # by querying the neural network that you're using to learn the baseline\n if self.nn_baseline:\n baselines_normalized = self.actor.run_baseline_prediction(obs)\n # ensure that the baseline and q_values have the same dimensionality\n # to prevent silent broadcasting errors\n assert baselines_normalized.ndim == q_values.ndim\n # baseline was trained with standardized q_values, so ensure that the predictions\n # have the same mean and standard deviation as the current batch of q_values\n baselines = baselines_normalized * np.std(q_values) + np.mean(q_values)\n # DONE: compute advantage estimates using q_values and baselines\n advantages = q_values - baselines\n\n # Else, just set the advantage to [Q]\n else:\n advantages = q_values.copy()\n\n # Normalize the resulting advantages\n if self.standardize_advantages:\n # DONE: standardize the advantages to have a mean of zero\n # and a standard deviation of one\n # HINT: there is a `normalize` function in `infrastructure.utils`\n advantages = utils.normalize(advantages, advantages.mean(), advantages.std())\n\n return advantages", "def calculate_advantage(self, returns, observations):\n if self.config.use_baseline:\n # override the behavior of advantage by subtracting baseline\n advantages = self.baseline_network.calculate_advantage(returns, observations)\n else:\n advantages = returns\n\n if self.config.normalize_advantage:\n advantages = self.normalize_advantage(advantages)\n\n return advantages", "def q_expected(self):\n total = 0.0\n for a in self.pre:\n if self.atom_state[a] == ATOM_ENABLED:\n total += self.usecount * self.Q[a]\n else:\n for a2 in (a, a.negate()):\n total += self.frequencies[a2] * self.Q[a2]\n \n for a in self.eff:\n if self.atom_state[a] == ATOM_DISABLED:\n total += self.usecount * self.Q[a.negate()]\n else:\n for a2 in (a, a.negate()):\n total += self.frequencies[a2] * self.Q[a2]\n \n return (total/self.usecount) / (len(self.pre)+len(self.eff))", "def q_learning_baseline(args):\n # make environement and define observation format with Wrappers\n env = gym.make(args.env_name)\n env = RGBImgPartialObsWrapper(env) # Get pixel observations\n env = ImgObsWrapper(env) # Get rid of the 'mission' field\n env.seed(0) # sets the seed\n obs = env.reset() # This now produces an RGB tensor only\n\n # Vector for storing intermediate results\n reward_history, avg_reward = [], []\n\n params = QParams(gamma=0.999, eps_start=1.0, eps_dec=5e-10, eps_end=0.05, n_actions=7, lr=1e-4)\n agent = QAgent(params)\n\n print(\"Episode num_steps avg_Reward\")\n for i in range(args.num_games):\n num_step , accum_reward = 0, 0\n done = False\n obs = env.reset()\n while not done:\n\n action = agent.choose_action(obs)\n obs_, reward, done, info = env.step(action)\n agent.learn(obs, action, reward, obs_, done)\n obs = deepcopy(obs_)\n accum_reward += reward\n num_step += 1\n\n reward_history.append(accum_reward/num_step)\n avg_reward.append(np.mean(reward_history[-AVG_FREQ:]))\n\n if (i % PRINT_FREQ == 0): # Print training every PRINT_FREQ episodes\n print('%i \\t %s \\t %.3f' % (i, env.step_count, avg_reward[-1]))\n env.close()\n return avg_reward", "def penalty(self):\n diff = self.Q[-1] - self.qB\n return 1/(2*self.sigma_sq)*assemble(inner(diff,diff)*dx)", "def normalize_advantage(self, advantages):\n #######################################################\n ######### YOUR CODE HERE - 1-5 lines. ############\n\n advantages = (advantages - np.mean(advantages)) / np.std(advantages)\n\n #######################################################\n ######### END YOUR CODE. ############\n return advantages", "def gap(baseline, heuristic):\n return 100 * (heuristic - baseline) / baseline", "def get_lineup_efficiency(league: League, lineup: List[Player]) -> float:\n max_score = get_best_lineup(league, lineup)\n real_score = np.sum(\n [player.points for player in lineup if player.slot_position not in (\"BE\", \"IR\")]\n )\n return real_score / max_score", "def test_performance_difference_lemma_discounted(M):\n\n p = random_dist(M.S, M.A)\n q = random_dist(M.S, M.A)\n\n dp = M.d(p) # Roll-in with p\n Aq = M.Advantage(q) # Roll-out with q\n # Accumulate advantages of p over q.\n z = 1/(1-M.γ) * sum(dp[s] * p[s,:] @ Aq[s,:] for s in range(M.S))\n\n assert np.allclose(M.J(p) - M.J(q), z)\n print('[pd-lemma]', ok)\n\n\n # The PD lemma is just potential-based shaping.\n # See `test_potential_based_shaping` to read about potential-based shaping.\n #\n # Let `ϕ(s) = Vq(s)` where `Vq(s)` is the value function of some policy `q`.\n # The shaped reward is\n #\n # R'(s,a,s') = R(s,a,s') + γ Vq(s') - Vq(s)\n #\n # Now take the expectation over s',\n #\n # E_{s'}[ R'(s,a,s') ]\n # = E_{s'}[ R(s,a,s') + γ Vq(s') - Vq(s) ]\n # = E_{s'}[ R(s,a,s') + γ Vq(s') ] - Vq(s)\n # = Qq(s,a) - Vq(s).\n # = Aq(s, a)\n #\n # We see that the shaped reward function is the advantage of policy `q`.\n\n ϕ = M.V(q)\n M1 = M.copy()\n M1.apply_potential_based_shaping(ϕ)\n\n assert_equal(M1.J(p), M.J(p) - M.J(q), verbose=True)\n\n # Sanity check: q should have no advantive over itself.\n assert abs(M1.J(q)) < 1e-10", "def get_stats(baseline, proposed):\n global total_segment_durationsb, total_segment_durationsp, \\\n max_segment_durationb, max_segment_durationp, \\\n min_segment_durationb, min_segment_durationp\n for entry in baseline:\n for segment in entry[1]:\n this_seg = segment[1] - segment[0]\n if this_seg > max_segment_durationb:\n max_segment_durationb = this_seg\n if this_seg < min_segment_durationb or min_segment_durationb == 0.0:\n min_segment_durationb = this_seg\n total_segment_durationsb += this_seg\n for entry in proposed:\n for segment in entry[1]:\n this_seg = segment[1] - segment[0]\n if this_seg > max_segment_durationp:\n max_segment_durationp = this_seg\n if this_seg < min_segment_durationp or min_segment_durationp == 0.0:\n min_segment_durationp = this_seg\n total_segment_durationsp += this_seg\n return (total_segment_durationsb, max_segment_durationb, min_segment_durationb), \\\n (total_segment_durationsp, max_segment_durationp, min_segment_durationp)", "def _calculate_baseline(self):\n if self.data_counter % self.window_step == 0 and len(self.data_queue) == self.window_length:\n measurement = util.to_list(self.measurement_func(list(self.data_queue)))\n if self.baseline is None:\n self.baseline = [[feature] for feature in measurement]\n else:\n for baseline_feature, feature in zip(self.baseline, measurement):\n baseline_feature.append(feature)\n if self.data_counter >= self.baseline_length:\n self.baseline = [abs(sum(feature)) / len(feature) for feature in self.baseline]\n self._handle_datapoint = self._calculate_measurement", "def siblings_baseline_score(top_node):\n base = 100000\n number_of_paragraphs = 0\n score_of_paragraphs = 0\n nodes_to_check = parser.get_elements_by_tag(top_node, tag='p')\n for node in nodes_to_check:\n node_text = parser.get_text(node)\n word_stats = StopWords().get_stop_word_count(node_text)\n high_link_density = is_high_link_density(node)\n if word_stats.get_stop_word_count() > 2 and not high_link_density:\n number_of_paragraphs += 1\n score_of_paragraphs += word_stats.get_stop_word_count()\n if number_of_paragraphs > 0:\n base = score_of_paragraphs / number_of_paragraphs\n return base", "def get_qscore_percentage(self, target_qscore=30, read_num=-1):\n \n q_upper_cols = [x for x in self.data.keys() if x[0]=='q' and int(x[1:]) > target_qscore-1 ]\n q_upper_df = self.idf[q_upper_cols] \n\n if read_num==-1:\n # return %>=qn for entire data set.\n q_upper_sum = q_upper_df.values.sum()\n q_total_sum = self.idf.values.sum()\n\n else:\n # segment Qscores by read_num. Let IndexError be raised for too-high read_num.\n # read_tiers example: [151,157,308] \n\n cycle_start = 0 if read_num == 0 else self.read_tiers[read_num - 1] + 1\n cycle_end = self.read_tiers[read_num]\n \n tiles = self.flowcell_layout['tilecount']\n lanes = self.flowcell_layout['lanecount']\n surfaces = self.flowcell_layout['surfacecount']\n\n i_start = 0 if read_num==0 else cycle_start * tiles * lanes * surfaces\n i_end = cycle_end * tiles * lanes * surfaces\n \n q_upper_sum = q_upper_df[i_start:i_end].values.sum()\n q_total_sum = self.idf[i_start:i_end].values.sum() \n \n # Return a percentage (like in Illumina SAV)\n if q_total_sum:\n return 100 * float(q_upper_sum) / float(q_total_sum)\n else:\n return 0", "def test_prioritized_dqn_paper_count():\n prioritized_dqn_entries = rldb.find_all({\n 'source-title': 'Prioritized Experience Replay',\n })\n\n assert len(prioritized_dqn_entries) == (\n 0\n + 57 # Proportional Prioritized DDQN\n + 57 # Rank Prioritized DQN\n + 57 # Rank Prioritized DDQN\n )", "def power_analysis_does_yhat_reduce_effect_of_income(non_image_dataset, pval_thresh, n_iterates, dv):\n\n all_results = []\n assert dv in ['koos_pain_subscore', 'womac_pain_subscore']\n knee_pain_scores = non_image_dataset.processed_dataframes['all_knee_pain_scores']\n clinical_ratings = non_image_dataset.processed_dataframes['kxr_sq_bu']\n df_to_use = get_baseline_scores(get_combined_dataframe(non_image_dataset, clinical_ratings)) \n print(\"Length of baseline data\")\n print(len(df_to_use))\n iv = 'binarized_income_at_least_50k'\n\n pain_subscores = ['koos_pain_subscore', 'womac_pain_subscore']\n assert dv in pain_subscores\n for k in pain_subscores:\n df_to_use[k] = (df_to_use[k] - df_to_use[k].mean()) / df_to_use[k].std()\n\n clinical_controls = '+'.join(['C(%s)' % a for a in non_image_dataset.clinical_xray_semiquantitative_cols])\n\n for noise_param in [3, 5, 8, 10]:\n for disparity_param in [.2]:\n print(\"Noise param: %2.3f; disparity param: %2.3f\" % (noise_param, disparity_param))\n # as disparity param increases in magnitude, yhat gets more correlated with SES. \n # as noise_param increases in magnitude, yhat gets less correlated with y and SES. \n\n if dv == 'womac_pain_subscore':\n # higher scores indicate worse pain on the womac\n # so if you have a higher SES we want you to have lower predicted Yhat. \n disparity_param = -disparity_param\n df_to_use['yhat'] = df_to_use[dv] + df_to_use[iv] * disparity_param + noise_param * np.random.randn(len(df_to_use),)\n df_to_use = df_to_use.dropna(subset=[dv, iv])\n print(df_to_use[[iv, 'yhat']].groupby(iv).agg(['mean', 'std']))\n for subset_size in list(range(250, 2000, 250)):\n for _ in range(n_iterates):\n people = set(random.sample(non_image_dataset.all_ids, subset_size))\n people_idxs = df_to_use['id'].map(lambda x:x in people).values\n\n model_without_yhat = sm.OLS.from_formula('%s ~ %s + %s' % (dv, iv, clinical_controls), df_to_use.loc[people_idxs]).fit()\n model_with_yhat = sm.OLS.from_formula('%s ~ %s + %s + yhat' % (dv, iv, clinical_controls), df_to_use.loc[people_idxs]).fit()\n\n\n change_in_iv_coef = model_with_yhat.params[iv] - model_without_yhat.params[iv]\n # Note: \n ## To get estimate of noise variance for a model, the following 3 are all the same.\n # this is sigma_hat SQUARED, not sigma_hat.\n # 1. np.sum(model_without_yhat.resid ** 2) / model_without_yhat.df_resid)\n # 2. model_without_yhat.mse_resid\n # 3. model_without_yhat.scale\n\n squared_error_on_change = (model_with_yhat.bse[iv] ** 2 - \n model_without_yhat.bse[iv] ** 2 * model_with_yhat.scale / model_without_yhat.scale)\n assert squared_error_on_change > 0\n error_on_change = np.sqrt(squared_error_on_change)\n zscore = change_in_iv_coef/error_on_change\n if (model_with_yhat.params[iv] > 0) != (model_without_yhat.params[iv] > 0): \n # if the sign of the coefficient changes that is weird. It should just get smaller. \n print(\"Warning: coefficient changed sign from %2.3f to %2.3f\" % (model_without_yhat.params[iv], model_with_yhat.params[iv]))\n results = {'r2_with_yhat':model_with_yhat.rsquared, \n 'r2_without_yhat':model_without_yhat.rsquared, \n 'beta_with_yhat':model_with_yhat.params[iv], \n 'beta_without_yhat':model_without_yhat.params[iv], \n 'change_in_IV_coef':change_in_iv_coef, \n 'error_on_change':error_on_change, \n 'zscore':zscore, \n 'p_change':2*(1 - norm.cdf(abs(zscore))), # two-tailed p-value. \n 'yhat_iv_corr':pearsonr(df_to_use['yhat'], df_to_use[iv])[0], \n 'yhat_dv_corr':pearsonr(df_to_use['yhat'], df_to_use[dv])[0], \n 'subset_size':subset_size}\n all_results.append(results)\n \n # now make plot. \n all_results = pd.DataFrame(all_results)\n for iv_corr in sorted(list(set(all_results['yhat_iv_corr'])), key=lambda x:abs(x)):\n for dv_corr in sorted(list(set(all_results['yhat_dv_corr'])), key=lambda x:abs(x)):\n idxs = ((all_results['yhat_iv_corr'] == iv_corr) & \n (all_results['yhat_dv_corr'] == dv_corr))\n if idxs.sum() == 0:\n continue\n x = []\n y = []\n for subset_size in sorted(list(set(all_results['subset_size']))):\n x.append(subset_size)\n results = all_results.loc[idxs & (all_results['subset_size'] == subset_size), \n 'p_change'] < pval_thresh\n assert len(results) == n_iterates\n y.append(results.mean())\n \n plt.plot(x, y, label='IV (income/educ): r=%2.3f, DV (pain): r=%2.3f' % (abs(iv_corr), \n abs(dv_corr)))\n \n plt.legend(bbox_to_anchor=(1.1, 1.05))\n plt.xlabel(\"Test set size\")\n plt.ylabel(\"Fraction of simulations\\nwhich are significant @ $p = %2.2f$\" % pval_thresh)\n plt.title(\"Significance test is for change in income/educ coef\")\n plt.savefig('power_analysis_does_yhat_reduce_effect_of_ses.png')", "def eval(self):\n\n print()\n multi_question = len(self._analogy_questions) > 1\n global_guessed = 0\n global_total = 0\n for i in range(len(self._analogy_questions)):\n questions = self._analogy_questions[i]\n # How many questions we get right at precision@1.\n correct = {i: 0 for i in xrange(ANALOGY_COUNT)}\n skips_map = {i: 0 for i in xrange(ANALOGY_COUNT + 1)}\n\n try:\n total = questions.shape[0]\n except AttributeError as e:\n raise AttributeError(\"Need to read analogy questions.\")\n\n start = 0\n while start < total:\n limit = start + 2500\n sub = questions[start:limit, :]\n idx = self._predict(sub)\n start = limit\n for question in xrange(sub.shape[0]):\n prio = 0\n skips = 0\n for j in xrange(ANALOGY_COUNT):\n if idx[question, j] == sub[question, 3]:\n # Bingo! We predicted correctly. E.g., [italy, rome, france, paris].\n correct[prio] += 1\n break\n elif idx[question, j] in sub[question, :3]:\n # We need to skip words already in the question.\n skips += 1\n continue\n else:\n # The correct label is not the precision@1\n prio += 1\n skips_map[skips] += 1\n accuracy_list = ' '.join('%5.1f%%' % (correct[i] * 100.0 / total) for i in xrange(ANALOGY_COUNT))\n total_skips = sum(skips_map.values())\n skips_list = ' '.join('%5.1f%%' % (skips_map[i] * 100.0 / total_skips) for i in xrange(1, ANALOGY_COUNT + 1))\n guessed = sum(correct.values())\n suffix = ' for #%d' % (i + 1) if multi_question else ''\n output(\"Eval%s %4d/%d accuracy = %5.1f%% [%s] skips [%s]\" % (\n suffix, guessed, total, guessed * 100.0 / total, accuracy_list, skips_list\n ))\n global_guessed += guessed\n global_total += total\n\n if multi_question:\n output(\"Eval global %4d/%d accuracy = %4.1f%%\" % (\n global_guessed, global_total, global_guessed * 100.0 / global_total\n ))", "def overlap_score(q1, q2):\n score = 0\n return score", "def bl_subtract(w_in: np.ndarray, a_baseline: float, w_out: np.ndarray) -> None:\n w_out[:] = np.nan\n\n if np.isnan(w_in).any() or np.isnan(a_baseline):\n return\n\n w_out[:] = w_in[:] - a_baseline", "def qval_lr(self,\n episode: int) -> float:\n return self.qval_learning_rate.learning_rate(episode)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updating parameters of policy and value function(if nn_baseline).
def update_parameters(self, ob_no, ac_na, q_n, adv_n, epoch): if self.nn_baseline: # Computing targets for value function. target_n = (q_n - np.mean(q_n)) / (np.std(q_n) + self.eps) # Updating the value function. self.sess.run(self.baseline_update_op, feed_dict={self.sy_ob_no: ob_no, self.sy_target_n: target_n}) # Updating the policy function. self.sess.run([self.update_op], feed_dict={self.sy_ob_no: ob_no, self.sy_ac_na: ac_na, self.sy_adv_n: adv_n}) # Save the model after updating. No check for the improvement :) self.saver.save(self.sess, os.path.join(self.model_dir, "model"), global_step=epoch)
[ "def update_policy(self):\n # get indexes wrt policy to retrieve appropriate reward and\n # transition matrix\n indexes = [np.arange(self.n_states_), self.policy_]\n\n reward = self.reward_[indexes]\n transition_proba = self.transition_proba_[indexes]\n\n # Compute updated value\n foo = np.eye(self.n_states_) - self.gamma_ * transition_proba\n self.value_ = np.matmul(np.linalg.inv(foo), reward)\n\n # Compute new policy\n self.policy_ = self.compute_optimal_policy()", "def update(self, policy):", "def _update_policy_and_distribution(self):\n self._policy = self.get_softmax_policy()\n self._distribution = distribution_std.DistributionPolicy(\n self._game, self._policy)", "def policyUpdate(self,rewards):\n #t0 = time.time()\n V = dict.fromkeys(self.states)\n for str_state in self.states:\n V[str_state] = 0\n \n # Value Iteration\n for it in range(self.n_iter):\n shuffle(self.states)\n for str_state in self.states:\n V[str_state] = max(list(map(lambda a: self.Bellman_a(a,V,self.gamma,str_state,rewards),range(self.cardA))))\n \n # Compute policy using value function\n for str_state in self.states:\n tmp = np.zeros(self.cardA)\n for a in range(self.cardA):\n r = rewards[str_state][a]+self.conf.loc[str_state,a] # UCB\n next_state = self.transition(str_state,a)\n r+=self.gamma*V[next_state]\n tmp[a] = r\n self.policy[str_state] = np.argmax(tmp)", "def test_post_parameter_update(self):\n # do one optimization step\n opt = optim.SGD(params=self.instance.parameters(), lr=1.0)\n batch = self.factory.mapped_triples[: self.batch_size, :].to(self.instance.device)\n scores = self.instance.score_hrt(hrt_batch=batch, mode=self.mode)\n fake_loss = scores.mean()\n fake_loss.backward()\n opt.step()\n\n # call post_parameter_update\n self.instance.post_parameter_update()\n\n # check model constraints\n self._check_constraints()", "def update_params(self): # computes gradient descent\n self.W=self.W-(self.rate*self.dW)\n self.b=self.b-(self.rate*self.db)", "def update_policy(self, new_policy):\n self.policy = new_policy", "def update_baseline(self, returns, observations):\n #######################################################\n ######### YOUR CODE HERE - 1-5 lines. ############\n\n self.sess.run(self.update_baseline_op, feed_dict={self.baseline_target_placeholder : returns,\n self.observation_placeholder : observations})\n\n # TODO\n #######################################################\n ######### END YOUR CODE. ############", "def update_algo_parameter(self, parameter_name, new_parameter_value):\n if hasattr(self, parameter_name):\n setattr(self, parameter_name, new_parameter_value)\n if parameter_name == \"lr\":\n for param_group in self.pi_optimizer.param_groups:\n param_group['lr'] = new_parameter_value\n for param_group in self.q_optimizer.param_groups:\n param_group['lr'] = new_parameter_value\n for param_group in self.alpha_optimizer.param_groups:\n param_group['lr'] = new_parameter_value", "def update(self, state:np.ndarray, action:int, reward:float, next_state:np.ndarray, terminal:bool):\n # TODO implement this function\n if terminal:\n vWPlus = 0\n else:\n vWPlus = self.vf.get_value(next_state)\n\n rho = reward + self.gamma*vWPlus-self.vf.get_value((state))\n #print(rho)\n #print(self.vf.get_value_grad(state))\n self.policy.add_to_params(self.alpha*rho*self.policy.gradient_prob(state, action))\n self.vf.add_to_params(self.beta*rho*self.vf.get_value_grad(state)[1])\n return None", "def update_average_policies(self):\n\n br_reach_probs = np.ones(self._num_players)\n avg_reach_probs = np.ones(self._num_players)\n self._average_policy_tables = [{} for _ in range(self._num_players)]\n# self._average_policy_tables_mlp = [{} for _ in range(self._num_players)]\n \n self._info_sets_inputs0 = []\n self._info_sets_targets0 = []\n self._info_sets_inputs1 = []\n self._info_sets_targets1 = []\n \n self._recursively_update_average_policies(self._game.new_initial_state(),\n avg_reach_probs, br_reach_probs)\n for i in range(self._num_players):\n self._policies[i] = _callable_tabular_policy(\n self._average_policy_tables[i])", "def update_learning_coeff(self):\n pass", "def policy(self, input_policy):\n self._policy = input_policy", "def set_fittable_parameters(p, model, fpn):\n for i, param_name in enumerate(fpn):\n param = getattr(model, param_name)\n param.value = p[i]", "def parameters_updated(self):\n self.calculate_variables()\n termination = self.detect_termination()\n if termination is None:\n self.request_estimation()\n self.monitor_progress()\n else:\n self.callback.plp_terminated(termination)", "def add_baseline_op(self, scope = \"baseline\"):\n ######################################################\n ######### YOUR CODE HERE - 4-8 lines. ############\n\n self.baseline = build_mlp(self.observation_placeholder,\n 1,\n scope,\n self.config.n_layers,\n self.config.layer_size,\n self.config.activation)\n loss = tf.losses.mean_squared_error(self.baseline_target_placeholder, tf.squeeze(self.baseline))\n optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)\n self.update_baseline_op = optimizer.minimize(loss)\n\n #######################################################\n ######### END YOUR CODE. ############", "def test_policy_gradient(self):\n model = VanillaPolicyGradient(self.hparams.env)\n self.trainer.fit(model)", "def policy_eval(policy, env, discount_factor=1.0, theta=0.00001):\n # Start with a random (all 0) value function\n cnt = 0\n V = np.zeros(env.nS)\n VV = np.zeros(env.nS)\n \n while True:\n delta = 0\n if cnt <4:\n print(V.reshape(env.shape))\n cnt = cnt+1\n # For each state, perform a \"full backup\"\n for s in range(env.nS):\n v = 0\n # Look at the possible next actions\n #print(\"\")\n for a, action_prob in enumerate(policy[s]):\n # For each action, look at the possible next states...\n #print(\"111\")\n for prob, next_state, reward, done in env.P[s][a]:\n #print(prob)\n #print(done)\n # Calculate the expected value\n v += action_prob * prob * (reward + discount_factor * V[next_state])\n # How much our value function changed (across any states)\n delta = max(delta, np.abs(v - V[s]))\n VV[s] = v\n # Stop evaluating once our value function change is below a threshold\n V=VV\n if delta < theta:\n break\n return np.array(V)", "def update_parameters(self, model, step=None, start_step=None, end_step=None):\n if isinstance(model, AveragedModel):\n model = model.module\n for p_swa, p_model in zip(self.parameters(), model.parameters()):\n device = p_swa.device\n p_model_ = p_model.detach().to(device)\n if self.n_averaged == 0:\n p_swa.detach().copy_(p_model_)\n else:\n p_swa.detach().copy_(\n self.avg_fn(p_swa.detach(), p_model_, self.n_averaged.to(device))\n )\n self.n_averaged += 1\n\n if step is not None:\n if start_step is None:\n start_step = step\n if end_step is None:\n end_step = step\n\n if start_step is not None:\n if self.n_averaged == 1:\n self.start_step = start_step\n\n if end_step is not None:\n self.end_step = end_step" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test address breakpoints set with shared library of SBAddress work correctly.
def test_address_breakpoints (self): self.build() self.address_breakpoints()
[ "async def test_addressable_light(hass: HomeAssistant) -> None:\n config_entry = MockConfigEntry(\n domain=DOMAIN,\n data={CONF_HOST: IP_ADDRESS, CONF_NAME: DEFAULT_ENTRY_TITLE},\n unique_id=MAC_ADDRESS,\n )\n config_entry.add_to_hass(hass)\n bulb = _mocked_bulb()\n bulb.raw_state = bulb.raw_state._replace(model_num=0x33) # RGB only model\n bulb.color_modes = {FLUX_COLOR_MODE_ADDRESSABLE}\n bulb.color_mode = FLUX_COLOR_MODE_ADDRESSABLE\n with _patch_discovery(), _patch_wifibulb(device=bulb):\n await async_setup_component(hass, flux_led.DOMAIN, {flux_led.DOMAIN: {}})\n await hass.async_block_till_done()\n\n entity_id = \"light.bulb_rgbcw_ddeeff\"\n\n state = hass.states.get(entity_id)\n assert state.state == STATE_ON\n attributes = state.attributes\n assert attributes[ATTR_COLOR_MODE] == \"onoff\"\n assert ATTR_EFFECT_LIST in attributes\n assert attributes[ATTR_SUPPORTED_COLOR_MODES] == [\"onoff\"]\n\n await hass.services.async_call(\n LIGHT_DOMAIN, \"turn_off\", {ATTR_ENTITY_ID: entity_id}, blocking=True\n )\n bulb.async_turn_off.assert_called_once()\n\n await async_mock_device_turn_off(hass, bulb)\n assert hass.states.get(entity_id).state == STATE_OFF\n\n await hass.services.async_call(\n LIGHT_DOMAIN, \"turn_on\", {ATTR_ENTITY_ID: entity_id}, blocking=True\n )\n bulb.async_turn_on.assert_called_once()\n bulb.async_turn_on.reset_mock()\n await async_mock_device_turn_on(hass, bulb)", "def test_get_address(self):\n self.assertEqual(self.sharezone.address,self.tool.get_address())", "def test_setbrk(self):\n self.cmd('setbrk main.c, 3')\n self.debugger_mock.set_breakpoint.assert_called_with('main.c', 3)", "async def test_light_mac_address_not_found(hass: HomeAssistant) -> None:\n config_entry = MockConfigEntry(\n domain=DOMAIN, data={CONF_HOST: IP_ADDRESS, CONF_NAME: DEFAULT_ENTRY_TITLE}\n )\n config_entry.add_to_hass(hass)\n bulb = _mocked_bulb()\n with _patch_discovery(no_device=True), _patch_wifibulb(device=bulb):\n await async_setup_component(hass, flux_led.DOMAIN, {flux_led.DOMAIN: {}})\n await hass.async_block_till_done()\n\n entity_id = \"light.bulb_rgbcw_ddeeff\"\n entity_registry = er.async_get(hass)\n assert entity_registry.async_get(entity_id).unique_id == config_entry.entry_id\n state = hass.states.get(entity_id)\n assert state.state == STATE_ON", "def test_staking_validators_validator_addr_get(self):\n pass", "def defineMyIpAddress(address) :\n print(\"not yet implemented\")", "def test_ip_addresses_update(self):\n pass", "def set_breakpoint_address(self, break_addr):\n #break_addr = int(break_addr, 16)\n #if not self.start_addr <= break_addr <= self.end_addr:\n # raise AddressOutOfRange(\"Address is out of .text memory range!\")\n #else:\n bp_id = self._next_bp_id()\n self.instruction_breakpoints[break_addr] = bp_id", "def test_get_xrp__ripple_address_details(self):\n pass", "def test_ip_addresses_create(self):\n pass", "def test_config_with_auto_set_address(self):\n # First change the board address so it cannot be found at the\n # default address.\n new_addr = 0x70\n ThunderBorg.set_i2c_address(new_addr)\n # Now instantiate ThunderBorg.\n tb = ThunderBorg(logger_name=self._LOG_FILENAME,\n log_level=logging.DEBUG,\n auto_set_addr=True)", "def testScalar_MacAddress(self):\n self.scalarGetAndCheck(\"snimpyMacAddress\", \"11:12:13:14:15:16\")", "def test_find_address(session, manuhome_id, street, city, region, country):\n if model_utils.is_legacy():\n location: Db2Location = Db2Location.find_by_manuhome_id_active(manuhome_id)\n assert location\n loc_json = location.registration_json\n assert loc_json.get('address')\n assert loc_json['address'].get('city') == city\n assert loc_json['address'].get('street') == street\n assert loc_json['address'].get('region') == region\n assert loc_json['address'].get('country') == country", "def test_acsls_to_internal(self):\n self.assertEqual(\"3,3,-1,1,1\",\n acs2internal.acsls_addr_to_internal_addr( \\\n acs_address=\"1,10,1,4\"))", "def test_BridgeAddressBase_init(self):\n self.assertIsNone(self.bab._address)\n self.assertIsNone(self.bab._fingerprint)", "def test_ip_addresses_list(self):\n pass", "def test_centipede_address_build(self):\n os.environ['SANITIZER'] = 'address'\n os.environ['ENGINE'] = 'centipede'\n os.environ['ARCHITECTURE'] = 'x86_64'\n project_yaml = {\n 'language': 'c++',\n 'fuzzing_engines': ['centipede'],\n 'sanitizers': ['address']\n }\n self.assertTrue(build.should_build(project_yaml))", "def test_ip_addresses_read(self):\n pass", "def test_internal_to_acsls(self):\n self.assertEqual(\"1,10,1,4\",\n acs2internal.internal_addr_to_acsls_addr( \\\n internal_address=\"3,3,-1,1,1\"))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lock and open the Grailfile at the given path.
def _open_grailfile(path): # if the Grailfile is foobar/Grailfile, store a lock at foobar/.grail/LOCK dotdir_path = _get_dotgrail_dir(path) lock_path = dotdir_path / 'LOCK' # Don't sit there waiting for the Grailfile to be unlocked lock = fasteners.InterProcessLock(str(lock_path)) with fasteners.try_lock(lock) as got: if not got: raise utils.GrailError("Grailfile is locked") # Open the manifest and read it entirely into memory lines = None with path.open('r') as f: lines = list(f.readlines()) # Return the Grailfile object from the context manager grailfile = Grailfile(lines) yield grailfile # When the context manager is exiting, write out the contents of the manifest to disk. with path.open('w') as f: grailfile.write(f)
[ "def fileLocked(self, the_file, ctx=None):\n pass", "def _lock( self, fileref=None, filehash=None ):\n\t\treturn self.__setlock( True, fileref=fileref, filehash=filehash )", "def __enter__(self):\n self._rpc_lock()\n old_mask = os.umask(0o077)\n try:\n trial_count = 0\n while self._fid is None and trial_count < 2:\n if os.path.exists(self._LOCK_PATH):\n # Rename existing file if it is not secure\n is_secure_path(self._LOCK_PATH)\n self._fid = open(self._LOCK_PATH, 'a+')\n if not is_secure_file(self._LOCK_PATH, self._fid):\n # File is insecure and was renamed, try again\n self._fid.close()\n self._fid = None\n trial_count += 1\n finally:\n os.umask(old_mask)\n if self._fid == None:\n self._rpc_unlock()\n raise RuntimeError('Unable to open write lock securely after two tries')\n # Advisory lock protects against simultaneous multi-process\n # modifications to the file, although we expect only one geopmd\n # process using this class.\n fcntl.lockf(self._fid, fcntl.LOCK_EX)\n self._fid.seek(0)\n return self", "def lockfile(path, max_retries=10, retry_delay=1, shared=False, error=None):\n tries = 1\n max_tries = 1 + max_retries\n path = path + '.lock'\n\n lock = None\n while lock is None and tries <= max_tries:\n try:\n lock = LockFile(path, shared=shared)\n except LockError:\n tries += 1\n if tries <= max_tries:\n time.sleep(retry_delay)\n\n try:\n if error and lock is None:\n raise error\n yield lock\n finally:\n if lock is not None:\n lock.close()", "async def acquire_lock(self) -> None:\n lockfile = self._proj_dir / \"zcbe.lock\"\n while lockfile.exists():\n self._warner.warn(\"lock-exists\",\n f\"Lock file {lockfile} exists\")\n await asyncio.sleep(10)\n lockfile.touch()", "def openFileInPath(self, path):\n try:\n if os.path.exists(path):\n os.startfile(path)\n except:\n print(traceback.format_exc())", "def save_lock_file(self, filepath):\n from dante.parsers import Parser\n Parser.save_lock_file(requirements=self, filepath=filepath)", "def set_lockfile(lockfile_path: pathlib.Path):\n log.debug(\"Setting watcher lockfile\")\n\n lockfile_path.touch(exist_ok=False)\n\n info = {\n \"host\": socket.getfqdn(),\n \"user\": getpass.getuser(),\n \"date_started\": str(datetime.datetime.now())\n }\n lockfile_path.write_text(json.dumps(info, indent=4))", "def acquire_flock(fname):\n\n lockfile = None\n\n try:\n lockfile = open(fname, 'wb')\n fcntl.flock(lockfile.fileno(), fcntl.LOCK_EX)\n except:\n # Note: caller is assumed to treat None as error\n if lockfile is not None:\n lockfile.close()\n lockfile = None\n\n return lockfile", "def do_lock(filename):\n try:\n file = open(filename, \"w\")\n file.write(\"locked\\n\")\n file.close()\n print_with_timestamp(\"Locked via file: %s\" % filename)\n return True\n except IOError as err:\n bail_with_message(\"I/O error({0}): {1}\".format(err.errno, err.strerror))", "def __enter__(self):\n\n self.acquire_lock(self.filename)\n self.file = open(self.filename, self.mode)\n return self.file", "def test_lockfile(self):\n with lockfile(self.path) as lock:\n self.assertIsInstance(lock, LockFile)", "def _attempt_lock(lock_file):\n umask_original = os.umask(0)\n try:\n fp = os.open(lock_file, os.O_WRONLY | os.O_CREAT, stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)\n finally:\n os.umask(umask_original)\n\n try:\n fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)\n except IOError:\n return False\n\n return True", "def check_file_lock(self, repo_id, path, user):\n return 0", "def wait_and_lock():\n # Waits forever to get a lock on the lockfile\n # If an unrelated error occures a exception is raised \n self._f = open(self._filename, 'w')\n while true:\n try:\n fcntl.flock(filename, fcntl.LOCK_EX | dcnt.LOCK_NM)\n return\n except IOError as e:\n if e.errno == errno.EAGAIN:\n # Do not raise error when waiting to aquire lock\n time.sleep(0.1)\n else\n # Raise on all unrelated errors\n raise", "def _generateLockFile(self):\n if not os.path.isfile(self.__path):\n Logger.info(\"MEG LOCKING: GENERATING LOCK FILE\")\n os.makedirs(os.path.dirname(self.__path), exist_ok=True)\n open(self.__path, 'w+').close()", "def LockFile(fd):\n try:\n fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)\n except IOError as err:\n if err.errno == errno.EAGAIN:\n raise errors.LockError(\"File already locked\")\n raise", "def fancy_open(path, mode = \"r\", lock = False, delete = False, pass_missing = False):\n # On NFSv4, we can't lock read-only files\n lock = lock and (\"w\" in mode or \"a\" in mode)\n\n try:\n f = open(path, mode)\n except IOError as e:\n # If we're just reading and pass_missing is set, ignore file missing\n if not (e.errno == errno.ENOENT and mode == \"r\" and pass_missing):\n raise e\n else:\n yield []\n else:\n if lock:\n fcntl.flock(f, fcntl.LOCK_EX)\n try:\n yield f\n finally:\n if lock:\n fcntl.flock(f, fcntl.LOCK_UN)\n\n f.close()\n\n # Race condition here? Can we remove a file before we unlock it?\n if delete:\n os.remove(path)", "def lock_file(filename, mode='r+', blocking=False):\n # TODO(wickman) We should probably adopt the lockfile project here as has\n # a platform-independent file locking implementation.\n if not HAS_FCNTL:\n raise RuntimeError('Interpreter does not support fcntl!')\n\n try:\n fp = open(filename, mode)\n except IOError:\n return None\n\n try:\n fcntl.flock(fp, fcntl.LOCK_EX | fcntl.LOCK_NB if not blocking else fcntl.LOCK_EX)\n except IOError as e:\n if e.errno in (errno.EACCES, errno.EAGAIN):\n fp.close()\n return False\n\n return fp" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Search up from the current directory for a Grailfile.
def find(): try: grailfile_dir = next(filter(_grailfile_exists, _search_path())) with _open_grailfile(grailfile_dir / 'Grailfile') as grailfile: yield grailfile except StopIteration as exc: raise utils.GrailError("No Grailfile found") from exc
[ "def search_file(filename, search_path):\n for path in string.split(search_path, \":\"):\n candidate = os.path.join(path, filename)\n if os.path.exists(candidate):\n return os.path.abspath(candidate)\n return None", "def search_file(filename, search_path):\n file_found = 0\n paths = string.split(search_path, ':')\n for path in paths:\n if os.path.exists(os.path.join(path, filename)):\n file_found = 1\n break\n if file_found:\n return os.path.abspath(os.path.join(path, filename))\n else:\n return None", "def _search_for_file(self, folder_path, basename):\n for file_name in os.listdir(folder_path):\n path = os.path.join(folder_path, file_name)\n if os.path.isdir(path):\n path = self._search_for_file(path, basename)\n if path:\n return path\n elif file_name.startswith(basename):\n return path\n return None", "def searchFile(file, subdir=None, extraDir=None):\n tests = []\n if Settings.value('koo.custom_ui_dir'):\n tests += [Settings.value('koo.custom_ui_dir')]\n if extraDir:\n tests += [extraDir]\n if subdir:\n tests += [os.path.join(x, subdir) for x in sys.path]\n tests += [os.path.join(x, 'Koo', subdir) for x in sys.path]\n # The following line is needed for Koo to work properly\n # under windows. Mainly we say attach 'share/koo/subdir' to\n # sys.path, which by default has 'c:\\python25' (among others).\n # This will give 'c:\\python25\\share\\koo\\ui' for example, which is\n # where '.ui' files are stored under the Windows platform.\n tests += [os.path.join(x, 'share', 'Koo', subdir) for x in sys.path]\n tests += ['%s/share/Koo/%s' % (sys.prefix, subdir)]\n else:\n tests += [os.path.join(x, 'Koo') for x in sys.path]\n tests += sys.path\n\n for p in tests:\n x = os.path.join(p, file)\n if os.path.exists(x):\n return x\n # Previously we returned False but None is more appropiate\n # and even some functions (such as initializeTranslations using\n # gettext.translation() will depend on it).\n return None", "def scan_build_files(self, base_path):", "def find(file_name, path=None):\n\n if not path:\n path = os.getcwd()\n\n #Traverse through the path looking for the given file name\n for dirpath, dirnames, filenames in os.walk(path):\n if file_name in filenames:\n file_name = os.path.join(dirpath, file_name)\n file_details = get_file_details(file_name)\n print(file_name, file_details)", "def which(searchFile) :\n for searchPath in os.environ[\"PATH\"].split(os.pathsep):\n test=os.path.join(searchPath,searchFile)\n if os.path.isfile(test): return test\n\n return None", "def __find_file(cls, file_base_name: str) -> str:\n\n directory = os.path.dirname(file_base_name)\n file_base = os.path.basename(file_base_name)\n\n # Identify all files in the directory.\n files = [\n os.path.join(directory, entry)\n for entry in os.listdir(directory)\n if os.path.isfile(os.path.join(directory, entry))\n ]\n\n # Find all files which match the base file name pattern.\n potential_matches = [\n file\n for file in files\n if file_base == os.path.splitext(os.path.basename(file))[0]\n ]\n\n # Filter to only files which match allowed extension patterns\n potential_matches = [\n file\n for file in potential_matches\n if os.path.splitext(file)[1].lower() in ['.yml', '.yaml']\n ]\n\n # Oops - looks like we have more than one file that matches the pattern,\n if len(potential_matches) > 1:\n raise ConfizzoError(f\"More than one file with name {file_base} (absent extension) was found.\")\n\n # Yikes - we seem to have not identified the configuration.\n if len(potential_matches) == 0:\n raise ConfizzoError(f\"No configuration files for {file_base} were found.\")\n\n return potential_matches[0]", "def _find_and_process_file(self, target_filename, handler):\n for dirname, _, files in os.walk(self._args.assetdir):\n for filename in files:\n if filename == target_filename:\n handler(os.path.join(dirname, filename))\n return\n print('{} not found'.format(target_filename))", "def findFileFromRoot(ifile):\n\n if os.path.isfile(ifile):\n return ifile\n\n ofile = None\n file = ifile\n while file != \"\":\n dirname, basename = os.path.split(file)\n if ofile:\n ofile = os.path.join(basename, ofile)\n else:\n ofile = basename\n\n if os.path.isfile(ofile):\n return ofile\n\n file = dirname\n\n raise IOError(\"Can't find %s\" % ifile)", "def _find_src_file(self, file_name):\n\n # look for absolute path first\n if os.path.sep == file_name[0]:\n if os.path.isfile(file_name):\n return os.path.normpath(file_name)\n\n # look in source dirs\n for src_dir in self._src_dirs:\n file_path = os.path.join(src_dir, file_name)\n if os.path.isfile(file_path):\n return os.path.normpath(file_path)\n\n raise FileNotFoundError(f\"Can't find {file_name} in one of the source dirs.\")", "def searchForFile(basename: 'SbString', directories: 'SbStringList', subdirectories: 'SbStringList') -> \"SbString\":\n return _coin.SoInput_searchForFile(basename, directories, subdirectories)", "def locate_file(pattern):\n\n path = None\n for fname in glob(pattern):\n if os.path.isfile(fname):\n path = fname\n\n return path", "def SoInput_searchForFile(basename: 'SbString', directories: 'SbStringList', subdirectories: 'SbStringList') -> \"SbString\":\n return _coin.SoInput_searchForFile(basename, directories, subdirectories)", "def get_file(self, name):\n\n for source_dir in self._sysroot.source_dirs:\n self.verbose(\"looking for '{0}' in {1}\".format(name, source_dir))\n\n pathname = os.path.join(source_dir, name)\n if os.path.isfile(pathname):\n self.verbose(\"found '{0}' in {1}\".format(name, source_dir))\n\n return pathname\n\n return None", "def _searchMainSourceFile(self, cfe_src_dir_path):\n\n # Search main function in src directory\n cfe_src_files = os.listdir(cfe_src_dir_path)\n main_src_file = None\n for cfe_src_file in cfe_src_files:\n cfe_src_file_path = cfe_src_dir_path + \"/\" + cfe_src_file\n if os.path.isfile(cfe_src_file_path):\n # open and read line\n is_search = False\n for line in open(cfe_src_file_path, \"r\"):\n # check \"int main(\"\n result_list = re.findall(r\"int\\s+main\\(\", line)\n # if exist, break this loop\n if len(result_list) > 0:\n is_search = True\n break\n if is_search is True:\n main_src_file = cfe_src_file\n break\n\n if main_src_file is None:\n raise Exception(\"Error: Could not find file with main function\")\n\n return main_src_file", "def scan_directory(full_path):\n\n included_files = []\n for containing_dir, _, possible_files in os.walk(full_path):\n for filename in possible_files:\n _, ext = os.path.splitext(filename)\n if ext.lower() in INCLUDED_EXTENSIONS or filename in INCLUDED_FILES:\n included_files.append(os.path.join(containing_dir, filename))\n\n included_files.append((\"README.md\", open(os.path.join(STARTER_KIT_DIR, \"README.md\")).read()))\n included_files.append((\".gitignore\", open(os.path.join(STARTER_KIT_DIR, \".gitignore\")).read()))\n included_files.append((\"./docs/api-docs.md\", open(\"./learn-programming-challenge/api-docs.md\").read()))\n included_files.append((\"./docs/game-overview.md\", open(\"./learn-programming-challenge/game-overview.md\").read()))\n return included_files", "def find_logback_xml():\n paths = [\n os.curdir,\n os.path.dirname(__file__),\n os.path.join(\n os.path.dirname(os.path.dirname(os.path.dirname(__file__))),\n \"java\",\n \"src\",\n \"main\",\n \"resources\",\n ),\n ]\n for path in paths:\n target = os.path.join(path, \"logback.xml\")\n if os.path.isfile(target):\n return target", "def find_file(input_dir, file_type):\n try:\n file_list = os.listdir(input_dir)\n except Exception:\n raise AnalyzerError(\"cannot read directory: \" + input_dir)\n\n for file in file_list:\n if file.endswith(file_type):\n return os.path.join(input_dir, file)\n\n raise AnalyzerError(f\"could not find any {file_type} file in {input_dir}\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check whether or not a Grailfile exists in the given directory.
def _grailfile_exists(path): grailfile = path / 'Grailfile' return grailfile.exists() and not grailfile.is_dir()
[ "def check_files_in_directory(self, path):\n if os.path.exists(path):\n return os.path.isfile(path)", "def file_exists(file_path):\n if not os.path.isfile(file_path):\n print(\"Could not find file under:\", file_path)\n return False\n return True", "def file_exists(file_path):\n return True if os.path.isfile(file_path) else False", "def file_exists(self) -> bool:\n return self._path.exists()", "def _file_exists(self, name):\n return self.dir.file_exists(name)", "def groc_dir_exists(self):\n return os.path.exists(self.groc_dir)", "def file_exists(self, path):\n path = path.strip('/')\n file_collection = self._get_fs_instance().list()\n if path == '':\n return False\n if path in file_collection:\n return True\n return False", "def dir_exists(self, path=''):\n if path == '':\n return True\n else:\n return False", "def file_exist(file):\n\n if path.isfile(file):\n return True\n else:\n return False", "def file_valid(self,\n file_path: Path):\n if file_path.exists():\n return True\n else:\n logger.critical('File directory or file name is incorrect. Aborted')\n quit()", "def directory_exists(path):\n return os.path.isdir(path) and os.path.exists(path)", "def check_if_file_exists(file_path: Path):\n if not file_path.exists() and file_path.is_file():\n raise NexusCascError(message=f\"Yaml file not found in {file_path}\")", "def directory_exists(directory_path):\n return True if os.path.isdir(directory_path) else False", "def pathIsValidAndExists(path):\n\treturn path is not None and os.path.exists(path)", "def validate_config_path(config_path):\n if os.path.exists(config_path):\n return True", "def exists(simulation):\n app.logger.debug('Checking if file '\n + _SIM_PATH + simulation + '.scala exits')\n return os.path.exists(_SIM_PATH + simulation + '.scala')", "def check_configuration(file):\n return os.path.isfile(file)", "def check_save_file_exists():\n saveFilePath = os.path.join(ASSETS_LOCATION, SAVEFILE_NAME)\n return os.path.exists(saveFilePath)", "def file_exists(self, resource: GenomicResource, filename: str) -> bool:" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the optimal weights for the neural network with a step activation function. This function will not be graded if there are no optimal weights. See the PDF for instructions on what each weight represents. The hidden layer weights are notated by [1] on the problem set and the output layer weights are notated by [2]. This function should return a dict with elements for each weight, see example_weights above.
def optimal_step_weights(): w = example_weights() # *** START CODE HERE *** w["hidden_layer_0_1"] = 0.5 w["hidden_layer_1_1"] = 0 w["hidden_layer_2_1"] = -1 w["hidden_layer_0_2"] = 0.5 w["hidden_layer_1_2"] = -1 w["hidden_layer_2_2"] = 0 w["hidden_layer_0_3"] = -4 w["hidden_layer_1_3"] = 1 w["hidden_layer_2_3"] = 1 w["output_layer_0"] = -0.5 w["output_layer_1"] = 1 w["output_layer_2"] = 1 w["output_layer_3"] = 1 # *** END CODE HERE *** return w
[ "def getWeightDict():\n \n weightDict = {}\n ## A list with weights in the same order as the suit factors above\n weightDict[8] = [0.29,0.22,0.21,0.28] \n weightDict[6] = [0.22,0.14,0.11,0.23,0.30]\n weightDict[5] = [0.29,0.34,0.37]\n weightDict[9] = [0.53,0.45,0.02]\n weightDict[4] = [0.46,0.35,0.19]\n return weightDict", "def build_weights(self) -> Dict[object, float]:\n self.build()\n\n self._weights = [np.random.rand() for x in range(0, self.n)]\n return dict(zip(self.build_property, self.weights))", "def get_weights_dict(comp_shape, in_shape, out_shape, std_mult=0.4, name='W',\n\t\t\t\t\t device='/cpu:0'):\n\tweights_dict = {}\n\tfor i, cs in enumerate(comp_shape):\n\t\tshape = cs + [in_shape,out_shape]\n\t\tweights_dict[i] = get_weights(shape, std_mult=std_mult,\n\t\t\t\t\t\t\t\t\t name=name+'_'+str(i), device=device)\n\treturn weights_dict", "def get_trainable_weight(op_weights):\n return op_weights[\"trainable\"]", "def get_all_weights(self):\n\n # add weights for each layer if layer is a Dense layer and return the list\n return [l.weights for l in self.layers if isinstance(l, Dense)]", "def _get_weights(\n data, edge_weights_fn=\"mutual_info\", n_jobs=-1, show_progress=True\n ):\n # Step 0: Check for edge weight computation method\n if edge_weights_fn == \"mutual_info\":\n edge_weights_fn = mutual_info_score\n elif edge_weights_fn == \"adjusted_mutual_info\":\n edge_weights_fn = adjusted_mutual_info_score\n elif edge_weights_fn == \"normalized_mutual_info\":\n edge_weights_fn = normalized_mutual_info_score\n elif not callable(edge_weights_fn):\n raise ValueError(\n f\"edge_weights_fn should either be 'mutual_info', 'adjusted_mutual_info', \"\n f\"'normalized_mutual_info'or a function of form fun(array, array). Got: f{edge_weights_fn}\"\n )\n\n # Step 1: Compute edge weights for a fully connected graph.\n n_vars = len(data.columns)\n pbar = combinations(data.columns, 2)\n if show_progress and SHOW_PROGRESS:\n pbar = tqdm(pbar, total=(n_vars * (n_vars - 1) / 2), desc=\"Building tree\")\n\n vals = Parallel(n_jobs=n_jobs, prefer=\"threads\")(\n delayed(edge_weights_fn)(data.loc[:, u], data.loc[:, v]) for u, v in pbar\n )\n weights = np.zeros((n_vars, n_vars))\n indices = np.triu_indices(n_vars, k=1)\n weights[indices] = vals\n weights.T[indices] = vals\n\n return weights", "def get_weights(self):\n\n weights = []\n for layer in self.NN:\n for node in layer:\n for weight in node.weights:\n weights.append(weight)\n return weights", "def prepare_tensors(self):\n self.weight_dict = { # Weights lower/activity upper\n 'P': {\n 'r': {\n 'weight': 'p_r',\n 'activity': 'P_r',\n 'tuning': 'p_t',\n # 'bias': 'i_b'\n }\n },\n 'I': {\n 'r': { # Recurrent state\n 'weight': 'i_r',\n 'bias': 'i_b',\n 'activity': 'I_r'\n },\n # 'f': { # Recurrent state\n # 'weight': 'i_f',\n # 'activity': 'I_f'\n # },\n },\n 'O': {\n 'r': { # Recurrent state\n 'weight': 'o_r',\n 'bias': 'o_b',\n 'activity': 'O_r'\n },\n # 'f': { # Recurrent state\n # 'weight': 'o_f',\n # 'activity': 'O_f'\n # },\n },\n 'xi': {\n 'r': { # Recurrent state\n 'weight': 'xi',\n }\n },\n # 'alpha': {\n # 'r': { # Recurrent state\n # 'weight': 'alpha',\n # }\n # },\n 'beta': {\n 'r': { # Recurrent state\n 'weight': 'beta',\n }\n },\n # 'mu': {\n # 'r': { # Recurrent state\n # 'weight': 'mu',\n # }\n # },\n 'nu': {\n 'r': { # Recurrent state\n 'weight': 'nu',\n }\n },\n 'zeta': {\n 'r': { # Recurrent state\n 'weight': 'zeta',\n }\n },\n 'gamma': {\n 'r': { # Recurrent state\n 'weight': 'gamma',\n }\n },\n 'phi': {\n 'r': { # Recurrent state\n 'weight': 'phi',\n }\n },\n 'kappa': {\n 'r': { # Recurrent state\n 'weight': 'kappa',\n }\n },\n 'rho': {\n 'r': { # Recurrent state\n 'weight': 'rho',\n }\n },\n }\n\n # weakly tuned summation: pooling in h, w dimensions\n #############################################\n with tf.variable_scope('contextual_circuit'):\n if isinstance(self.p_shape[0], list) and 'P' not in self.lesions:\n # VGG-style filters\n for pidx, pext in enumerate(self.p_shape):\n if pidx == 0:\n it_key = self.weight_dict['P']['r']['weight']\n else:\n self.weight_dict[\n 'P']['r']['weight_%s' % pidx] = 'p_r_%s' % pidx\n it_key = self.weight_dict['P']['r']['weight_%s' % pidx]\n setattr(\n self,\n it_key,\n tf.get_variable(\n name=it_key,\n dtype=self.dtype,\n initializer=initialization.xavier_initializer(\n shape=pext,\n uniform=self.normal_initializer),\n trainable=True))\n else:\n p_array = np.ones(self.p_shape)\n p_array[\n self.SSN // 2 - py_utils.ifloor(\n self.SRF / 2.0):self.SSF // 2 + py_utils.iceil(\n self.SSN / 2.0),\n self.SSN // 2 - py_utils.ifloor(\n self.SRF / 2.0):self.SSF // 2 + py_utils.iceil(\n self.SSN / 2.0),\n :, # exclude CRF!\n :] = 0.0\n p_array = p_array / p_array.sum()\n if 'P' in self.lesions:\n print 'Lesioning near eCRF.'\n p_array = np.zeros_like(p_array).astype(np.float32)\n\n # Association field is fully learnable\n if self.association_field and 'P' not in self.lesions:\n setattr(\n self,\n self.weight_dict['P']['r']['weight'],\n tf.get_variable(\n name=self.weight_dict['P']['r']['weight'],\n dtype=self.dtype,\n # shape=self.p_shape,\n initializer=initialization.xavier_initializer(\n shape=self.p_shape,\n uniform=self.normal_initializer),\n trainable=True))\n else:\n setattr(\n self,\n self.weight_dict['P']['r']['weight'],\n tf.get_variable(\n name=self.weight_dict['P']['r']['weight'],\n dtype=self.dtype,\n initializer=p_array.astype(np.float32),\n trainable=False))\n\n # Gate weights\n setattr(\n self,\n self.weight_dict['I']['r']['weight'],\n tf.get_variable(\n name=self.weight_dict['I']['r']['weight'],\n dtype=self.dtype,\n trainable=True,\n initializer=initialization.xavier_initializer(\n shape=self.i_shape,\n uniform=self.normal_initializer,\n mask=None)))\n # setattr(\n # self,\n # self.weight_dict['I']['f']['weight'],\n # tf.get_variable(\n # name=self.weight_dict['I']['f']['weight'],\n # dtype=self.dtype,\n # trainable=True,\n # initializer=initialization.xavier_initializer(\n # shape=self.i_shape,\n # uniform=self.normal_initializer,\n # mask=None)))\n if self.gate_bias_init == 'chronos':\n bias_init = -tf.log(\n tf.random_uniform(\n self.bias_shape, minval=1, maxval=self.timesteps - 1))\n else:\n bias_init = tf.ones(self.bias_shape)\n setattr(\n self,\n self.weight_dict['I']['r']['bias'],\n tf.get_variable(\n name=self.weight_dict['I']['r']['bias'],\n dtype=self.dtype,\n trainable=True,\n initializer=bias_init))\n\n # Output\n setattr(\n self,\n self.weight_dict['O']['r']['weight'],\n tf.get_variable(\n name=self.weight_dict['O']['r']['weight'],\n dtype=self.dtype,\n trainable=True,\n initializer=initialization.xavier_initializer(\n shape=self.o_shape,\n uniform=self.normal_initializer,\n mask=None)))\n # setattr(\n # self,\n # self.weight_dict['O']['f']['weight'],\n # tf.get_variable(\n # name=self.weight_dict['O']['f']['weight'],\n # dtype=self.dtype,\n # trainable=True,\n # initializer=initialization.xavier_initializer(\n # shape=self.o_shape,\n # uniform=self.normal_initializer,\n # mask=None)))\n if self.gate_bias_init == 'chronos':\n # bias_init = -tf.log(\n # tf.random_uniform(\n # self.bias_shape, minval=1, maxval=self.timesteps - 1))\n bias_init = -bias_init\n else:\n bias_init = tf.ones(self.bias_shape)\n setattr( # TODO: smart initialization of these\n self,\n self.weight_dict['O']['r']['bias'],\n tf.get_variable(\n name=self.weight_dict['O']['r']['bias'],\n dtype=self.dtype,\n trainable=True,\n initializer=bias_init))\n\n # Degree of freedom weights (vectors)\n w_shape = [1, 1, 1, self.k]\n b_shape = [1, 1, 1, self.k]\n # w_array = np.ones(w_shape).astype(np.float32)\n # b_array = np.zeros(b_shape).astype(np.float32)\n\n # Divisive params\n if self.beta and not self.lesion_beta:\n self.beta = tf.get_variable(\n name='beta',\n initializer=initialization.xavier_initializer(\n shape=w_shape,\n uniform=self.normal_initializer,\n mask=None))\n # initializer=tf.ones(w_shape, dtype=tf.float32))\n elif self.lesion_beta:\n self.beta = tf.constant(0.)\n else:\n self.beta = tf.constant(1.)\n\n if self.nu and not self.lesion_nu:\n self.nu = tf.get_variable(\n name='nu',\n initializer=initialization.xavier_initializer(\n shape=b_shape,\n uniform=self.normal_initializer,\n mask=None))\n # initializer=tf.zeros(b_shape, dtype=tf.float32))\n elif self.lesion_nu:\n self.nu = tf.constant(0.)\n else:\n self.nu = tf.constant(1.)\n if self.zeta:\n self.zeta = tf.get_variable(\n name='zeta',\n initializer=initialization.xavier_initializer(\n shape=w_shape,\n uniform=self.normal_initializer,\n mask=None))\n else:\n self.zeta = tf.constant(1.)\n if self.gamma:\n self.gamma = tf.get_variable(\n name='gamma',\n initializer=initialization.xavier_initializer(\n shape=w_shape,\n uniform=self.normal_initializer,\n mask=None))\n else:\n self.gamma = tf.constant(1.)\n # # TODO\n # self.ebias = tf.get_variable(\n # name='ebias',\n # initializer=initialization.xavier_initializer(\n # shape=b_shape,\n # uniform=self.normal_initializer,\n # mask=None))\n\n if self.xi:\n self.xi = tf.get_variable(\n name='xi',\n initializer=initialization.xavier_initializer(\n shape=w_shape,\n uniform=self.normal_initializer,\n mask=None))\n else:\n self.xi = tf.constant(1.)\n if self.multiplicative_excitation:\n if self.lesion_kappa:\n self.kappa = tf.constant(0.)\n else:\n self.kappa = tf.get_variable(\n name='kappa',\n initializer=initialization.xavier_initializer(\n shape=w_shape,\n uniform=self.normal_initializer,\n mask=None))\n # initializer=tf.zeros(w_shape, dtype=tf.float32) + 0.5)\n\n if self.lesion_omega:\n self.omega = tf.constant(0.)\n else:\n self.omega = tf.get_variable(\n name='omega',\n initializer=initialization.xavier_initializer(\n shape=w_shape,\n uniform=self.normal_initializer,\n mask=None))\n # initializer=tf.zeros(w_shape, dtype=tf.float32) + 0.5)\n else:\n self.kappa = tf.constant(1.)\n self.omega = tf.constant(1.)\n if self.adapation:\n self.rho = tf.get_variable(\n name='rho',\n initializer=tf.ones(self.timesteps, dtype=tf.float32))\n if self.lesion_omega:\n self.omega = tf.constant(0.)\n if self.lesion_kappa:\n self.kappa = tf.constant(0.)\n self.lateral_bias = tf.get_variable(\n name='lateral_bias',\n initializer=initialization.xavier_initializer(\n shape=b_shape,\n uniform=self.normal_initializer,\n mask=None))", "def initialize_weights(training_points):\n N = len(training_points)\n weight_dict = dict()\n for i in training_points:\n weight_dict[i] = make_fraction(1,N)\n return weight_dict", "def construct_fc_weights(self):\n dtype = tf.float32\n fc_weights = {}\n fc_initializer = tf.contrib.layers.xavier_initializer(dtype=dtype)\n filter_num = FLAGS.filter_num\n\n if FLAGS.phase=='pre':\n fc_weights['w5'] = tf.get_variable('fc_w5', [filter_num, FLAGS.pretrain_class_num], initializer=fc_initializer)\n fc_weights['b5'] = tf.Variable(tf.zeros([FLAGS.pretrain_class_num]), name='fc_b5')\n else:\n filter_dim = FLAGS.img_size // 16\n # assumes max pooling\n fc_weights['w5'] = tf.get_variable('w5', [filter_num * filter_dim * filter_dim, self.dim_output], initializer=fc_initializer)\n fc_weights['b5'] = tf.Variable(tf.zeros([self.dim_output]), name='b5')\n return fc_weights", "def showWeights(self):\n print 'W1: ' + str(self.params[0].get_value().shape)\n print self.params[0].get_value()\n print 'b1: ' + str(self.params[1].get_value().shape)\n print self.params[1].get_value()\n print 'W2: ' + str(self.params[2].get_value().shape)\n print self.params[2].get_value()\n print 'b2: ' + str(self.params[3].get_value().shape)\n print self.params[3].get_value()", "def get_weights(self):\r\n return self.weights # returning the weight matrix\r", "def get_loss_weights(arguments):\n\n default_returnvalue = {key: value for key, value in arguments.__dict__.items() if\n ((\"weight\" in key) and (not value == -1))}\n if (not arguments.loss_gen == TOTAL_G_LOSS):\n for key in default_returnvalue:\n if (not arguments.loss_gen in key):\n default_returnvalue[key] = 0.0\n\n print(default_returnvalue)\n return default_returnvalue", "def get_weight(self):\n return self.graph_weights.reshape(self.size_graph_rows, self.size_graph_cols)", "def get_weights(self, ):\n return [w for l in self.weights for w in l.flat]", "def get_weights_from_layer(self, i: int) -> np.ndarray:\n return self.__weights[i]", "def get_weights_A(self):\r\n # Get the weights from task A (in a numpy array, so that they are static)\r\n self.weightsA = []\r\n # Convert trainable weights to tensors\r\n for w in self.net.trainable_variables:\r\n self.weightsA.append(tf.convert_to_tensor(w.numpy()))", "def get_recurrent_weights(self):\n return npify(self.rnn_layer.weight_hh_l0)", "def get_weight_variables(self):\n var_names = [pv['name'] for pv in self.data['reference']]\n var_weight = {var: weight for var, weight in zip(var_names, self.data['feat_weights'])}\n return var_weight" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Trivial helper to collect and return all nonmesons.
def _get_non_mesons(PDGIDs): return [pid for pid in PDGIDs if pid not in _get_mesons(PDGIDs)]
[ "def obtener_notas(self):\n return list(self.notas)", "def findnonc(self):\n # some aliases\n assert (self.stimuli!=None and self.signals!=None)\n\n dist = nx.algorithms.floyd_warshall(self)\n\n namesNONC = []\n for node in self.nodes():\n # search for paths from the species to the signals\n spe2sig = [(node, dist[node][s]) for s in self.signals if dist[node][s]!=np.inf]\n # and from the nstimuli to the species\n sti2spe = [(node, dist[s][node]) for s in self.stimuli if dist[s][node]!=np.inf]\n\n if len(spe2sig)==0 or len(sti2spe)==0:\n if node not in self.signals and node not in self.stimuli:\n namesNONC.append(node)\n\n namesNONC = list(set(namesNONC)) # required ?\n return namesNONC", "def get_not_always_used(self):\n results_list = []\n\n # initial list is made of fixtures that are in the children\n initial_list = self.gather_all_required(include_parents=False)\n\n for c in self.get_leaves():\n j = 0\n for i in range(len(initial_list)):\n fixture_name = initial_list[j]\n if fixture_name not in c.gather_all_required():\n del initial_list[j]\n results_list.append(fixture_name)\n else:\n j += 1\n\n return results_list", "def filter_none(x: Iterable[Optional[A]]) -> Iterable[A]:\n return filter(not_none, x) # type: ignore", "def get_excluded_items():", "def from_nulity_matroid(matroid: tuple[set[T], Callable[[set[T]], int]]) -> list[set[T]]:\n E, _ = matroid\n return from_flats_matroid((E, flats.from_nulity_matroid(matroid)))", "def filter_invalid(l):\n return ([x for x in l if x is not None])", "def no_successors_iter(self):\n for n in self.nodes:\n if not len(list(self.successors(n))):\n yield n", "def _filter_species(parsed):\n coreactants, catalysts, other_species, _ = parsed\n combined = [d['Species'] for d in coreactants] + [d['Species'] for d in catalysts]\n # if not coreactants or catalysts found, return unchanged\n if not combined:\n return other_species\n\n else:\n unaccounted = []\n combined = ' '.join(combined)\n for species in other_species:\n found = re.search(re.escape(species), combined) # include individual tokens for multi-token names\n if not found and species != 'M':\n unaccounted.append(species)\n return list(set(unaccounted))", "def _filter(self, tokens):\n\t\tz = filter(lambda w: len(w) > 1 and w not in self.stopwords, tokens)\n\t\treturn [strip_special(w) for w in z]", "def get_not_matching_list(self, data: List[str]) -> List[str]:\n\n pre_result = re.compile(self.regex)\n\n return [x for x in data if not pre_result.search(str(x))]", "def get_discard_possibilities(self):\n result = []\n self.get_discard_possibilities_rec(self.hand, [], self.number_point, result)\n\n return result", "def cal_problematic_rxns(self):\n\n problematic_rxns = []\n for met in self.metabolites:\n if met.is_exclude:\n problematic_rxns.append(met.reactions)\n\n if len(problematic_rxns) > 0:\n problematic_rxns = frozenset.union(*problematic_rxns)\n problems = [i.id for i in problematic_rxns]\n return problems\n else:\n return []", "def get_unnecessary_elements(tag, clear_elem):\n tag_list = list(filter(lambda e: 'none' not in e, tag))\n\n garbage_full = list()\n\n for each_tag in tag_list:\n split_tag = each_tag.split('\"')\n try:\n clear_tag = split_tag[1]\n if clear_tag in clear_elem or 'inline' in clear_tag or re.search(r'^\\d+$', clear_tag):\n pass\n else:\n garbage_full.append(each_tag)\n except IndexError:\n garbage_full.append(each_tag)\n return garbage_full", "def unbeaten_candidates(mg):\n return [n for n in mg.nodes if mg.in_degree(n) == 0]", "def _filter_internal(iterable, return_internal):\n if return_internal:\n return iterable\n return [el for el in iterable if not el.internal]", "def _get_elements_to_discard(self):\n return self._get_elements(self.xpaths_to_discard)", "def gather_all_discarded(self):\n discarded = list(self.split_fixture_discarded_names)\n if self.parent is not None:\n discarded = discarded + self.parent.gather_all_discarded()\n\n return discarded", "def reactions_with_no_proteins(reactions, verbose=False):\n\n nopegs = set()\n for r in reactions:\n if reactions[r].number_of_enzymes() == 0:\n nopegs.add(r)\n\n if verbose:\n sys.stderr.write(\"REACTIONS WITH NO PROTEINS: {} reactions have no pegs associated \".format(len(nopegs)) +\n \"with them (out of {} reactions)\\n\".format(len(reactions)))\n\n return nopegs" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Obviously all pentaquarks are baryons!
def test_pentaquarks_are_baryons(PDGIDs): _pentaquarks = (PDGIDs.UCbarCUDPentaquark, PDGIDs.AntiUCbarCUDPentaquark) for pid in _pentaquarks: assert is_baryon(pid)
[ "def test_breed(self):\n\t\tpass", "def mood():", "def dummy_no_ephem():", "def test_aromaticity_perception_benzene(self):\n mol = Molecule(smiles='c1ccccc1')\n aromatic_atoms, aromatic_bonds = mol.get_aromatic_rings()\n self.assertEqual(len(aromatic_atoms), 1)\n self.assertEqual(len(aromatic_bonds), 1)\n for bond in aromatic_bonds[0]:\n self.assertTrue(bond.atom1 in aromatic_atoms[0] and bond.atom2 in aromatic_atoms[0])", "def test_pro_bowlers(self):\n pass", "def bark(self):\n return \"bark bark bark!\"", "def non_acsandhi(self):\n # PMS: 8.2.39. jhalAM jaSo \"nte\n self.jhalamjasonte()\n # PMS: 8.3.7. naS chavy apraSAn\n self.naschavyaprasan()\n # PMS: 8.3.14. ro ri \n self.rori()\n # PMS: 6.3.111. qhralope pUrvasya dIrgho \"RaH\n self.dhralope()\n # PMS: 8.3.17. bhobhagoaghoapUrvasya yo \"Si\n self.bhobhago() \n # PMS: 8.3.15. kharavasAnayor visarjanIyaH\n self.kharavasanayor()\n # PMS: 8.3.20. oto gArgyasya. This cannot precede vowel sandhi\n self.otogargyasya()\n # PMS: 8.3.23. mo \"nusvAraH\n self.monusvarah()\n if not self.NoKNam:\n # ejf: This condition is ALWAYS TRUE. See comment in namohrasvad.\n # PMS: 8.3.32. Namo hrasvAd aci NamuR nityam\n self.namohrasvad()\n # PMS: 8.3.34. visarjanIyasya saH\n self.visarjaniyasyasah()\n if self.ScharSchari:\n # PMS: 8.3.36. vA Sari\n self.vasari()\n # PMS: 8.3.41, 8.3.45 && 8.3.46 are apavAdas of 8.3.37\n # so they must precede it\n # PMS: 8.3.41. idudupadhasya cApratyayasya (kupvoH 37, iRaH zaH 39)\n self.idudupadhasya()\n # PMS: 8.3 .45. nitya samAse \"nutarapadasthasya\n self.nityamsamase()\n # PMS: 8.3.46. ataH kfkamikaMsakumbhapAtrakuSAkarRIzvanavyayasya\n self.atahkrkamikamsa()\n if self.XkXpKupvoh:\n # PMS: 8.3.37. kupvoH XkXpau ca (khari 15).\n self.kupvohXkXpau()\n # PMS: 8.3.28. NRoH kukwuk Sari\n self.nnohkuktuksari()\n # PMS: 8.3.31. Si tuk\n self.situk()\n if not self.NoStoh:\n # PMS: 8.4.40. stoH ScunA ScuH\n self.stohscunascuh()\n # PMS: 8.4.41. zwunA zwuH\n self.stunastuh()\n # PMS: 8.4.60. tor li\n self.torli()\n if self.ChAti:\n # PMS: 8.4.63. SaS cho \"wi. (jhayaH 62, anyatarasyAm 62)\n self.saschoti()\n # PMS: 8.4.63 vt. 964 chatvamamIti vaktavyam\n self.chatvamami()\n # PMS: 8.4.62. jhayo ho \"nyatarasyAm\n self.jhayoho()\n # PMS: 8.4.65 Jaro Jari savarRe\n self.jharojharisavarne()\n # PMS: 8.4.55. khari ca\n self.kharica()\n if self.ParaSavarna or self.OtherCloseSandhi:\n # PMS: 8.4.59. vA padAntasya (anusvArasya yayi parasavarRaH 58).\n self.anusvarasya()", "def bird_head():\n print(part_hair_curly())\n print(part_eyes_basic())\n print(part_nose_down())\n print(part_mouth_mustache())\n print(part_chin_basic())", "def test_aromaticity_perception_biphenyl(self):\n mol = Molecule(smiles='c1ccc(cc1)c2ccccc2')\n aromatic_atoms, aromatic_bonds = mol.get_aromatic_rings()\n self.assertEqual(len(aromatic_atoms), 2)\n self.assertEqual(len(aromatic_bonds), 2)\n for index in range(len(aromatic_atoms)):\n for bond in aromatic_bonds[index]:\n self.assertTrue(bond.atom1 in aromatic_atoms[index] and bond.atom2 in aromatic_atoms[index])", "def phero_evaporation(self):\n self.pheromone -= (self.pheromone * self.phero_evap)", "def test_aromatic_benzene(self):\n m = Molecule().from_smiles('C1=CC=CC=C1')\n isomers = m.generate_resonance_structures()\n self.assertTrue(any(isomer.is_aromatic() for isomer in isomers))", "def idudeddvivacanampragrhyam(self):\n self.Pragrhya = False\n # PMS: 1.1.11. IdUdeddvivacanam pragfhyam\n if self.External and self.Pada1 in pragrhya_set:\n self.Pragrhya = True", "def test_list_of_non_modulatory_phrases_is_empty_for_pieces_with_heavy_polymodal_frame():\n assert piece3.non_modulatory_phrases == []\n assert piece4.non_modulatory_phrases == []", "def ban_toxic_compounds(pl):\n pl.ban_compound(546) # Methylglyoxal", "def __get_bomb__(self,y,x,p):\n\t\t# p.inventory['bombs']['quantity'] += 3\n\t\tif p.addItem('bombs',3):\n\t\t\tself.maze[y][x]['contains']['bomb'] = False\n\t\t\tself.__addScore__(self.items['bomb']['score'],p)", "def test_HasOnlyOneArmature(self):\n self.assertTrue(len(Blender.Armature.Get())==1,\"This mesh has too many armatures.\")", "def yaronunasike(self):\n [self.Isthana2,self.Iyatna2] = identify(self.Linary[self.Index + 1])\n # PMS: if ( set_memberP(Linary[Index - 1],Yar) && (Index + 2 < linmax) then\n # PMS: if (Iyatna2 eq isparsa5) || (Linary[Index + 2] == sktanunasika ) {\n # PMS: we won't exercise the nasalized option for the semivowels \n # y, v && l; just for the stops\n if (set_memberP(self.Linary[self.Index - 1], Jhay)) and (self.Iyatna2 == isparsa5):\n [self.Isthana1,self.Iyatna1] = identify(self.Linary[self.Index - 1])\n self.Linary[self.Index - 1] = Soundary[self.Isthana1][isparsa5]", "def test_aromaticity_perception_azulene(self):\n mol = Molecule(smiles='c1cccc2cccc2c1')\n aromatic_atoms, aromatic_bonds = mol.get_aromatic_rings()\n self.assertEqual(len(aromatic_atoms), 0)\n self.assertEqual(len(aromatic_bonds), 0)", "def wants_plain_hotdog(ketchup, mustard, onion):\n pass\n return not (ketchup or mustard or onion)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start the http server
def start(self): self.log('Server started...') self.httpd.serve_forever()
[ "async def start(self):\n self._app = web.Application(\n loop=self._loop, middlewares=self._middlewares\n )\n for resource in self._nyuki.HTTP_RESOURCES:\n resource.RESOURCE_CLASS.register(self._nyuki, self._app.router)\n log.info(\"Starting the http server on {}:{}\".format(self._host, self._port))\n self._handler = self._app.make_handler(access_log=access_log)\n self._server = await self._loop.create_server(\n self._handler, host=self._host, port=self._port\n )", "def run(self):\n parts = urlparse(HOST_BASE)\n domain, port = parts.netloc.split(\":\")\n self.srv = make_server(domain, int(port), self.app)\n try:\n self.srv.serve_forever()\n except:\n import traceback\n traceback.print_exc()\n # Failed to start\n self.srv = None", "def start(self):\n self.httpd = socketserver.ThreadingTCPServer(\n (\"\", self.port), self.handler, False\n )\n self.httpd.request_queue_size = 500\n self.httpd.timeout = 2000\n self.httpd.server_bind()\n self.httpd.server_activate()\n\n if self.cert_filename != \"\" and os.path.isfile(self.cert_filename) and \\\n self.key_filename != \"\" and os.path.isfile(self.key_filename):\n self.httpd.socket = ssl.wrap_socket(\n self.httpd.socket, certfile=self.cert_filename, server_side=True,\n keyfile=self.key_filename\n )\n print(\"start serving\")\n _thread.start_new_thread(self.httpd.serve_forever, ())", "def run(self, host='127.0.0.1', port=5000):\n httpd = wsgiref.simple_server.make_server('', port, self)\n log(\"PWF now running on http://%s:%s/\" % (host, port,))\n httpd.serve_forever()", "def run(self):\n self.server = HTTPServer(('', HttpServer.serverPort), HttpHandler)\n self.server.timeout = HttpServer.timeout\n self.server.msg_queue = self.msg_queue\n logger.warning(\"HTTP server running\")\n try:\n while True:\n self.server.handle_request()\n try:\n msg: HttpMessage = self.msg_queue.get(False)\n self.update_state(msg)\n logger.debug(\"HTTP message received\")\n except Empty:\n pass\n except Exception:\n print(sys.exc_info())\n\n logger.warning(\"HTTP server terminating\")", "def start_httpd(addr): # pragma: no cover\n host, port = addr.split(':')\n logging.info('Starting HTTPD on {}:{}'.format(host, port))\n prometheus_client.start_http_server(int(port), host)", "def start():\n port = cfg.web.port\n\n events.dispatcher.register_target(event_logger)\n\n logging.info('Starting web server: port=%d' % port)\n utils.DaemonThread(target=bottle.run,\n kwargs={'host': cfg.web.bind,\n 'port': cfg.web.port}).start()", "def main() -> None:\n\n start_server()", "def start_http_server(port, addr=''):\n class ThreadingSimpleServer(ThreadingMixIn, HTTPServer):\n pass\n class PrometheusMetricsServer(threading.Thread):\n def run(self):\n httpd = ThreadingSimpleServer((addr, port), MetricsHandler)\n httpd.serve_forever()\n t = PrometheusMetricsServer()\n t.daemon = True\n t.start()", "def start(self):\n log.enable_pretty_logging()\n application = WebApplication(self.handlers_initializer, None, debug=self.get('debug'))\n application.listen(self.port)\n # _logger.info('Gandalf %sAPI running on port %s', self.env + ' ' if self.env else '', self.port)\n ioloop.IOLoop.current().start()", "def start(self):\n self._thread = threading.Thread(target=self._serve)\n self._thread.start()", "def Start(self):\n if self._server:\n raise ValueError('Server already started')\n\n\n self._server = server.BuildAndStart(self._router, self._port,\n self._thread_pool_size)", "def runserver():\n from web.server import runserver\n runserver()", "def serve(self, port=8000):\n \n # Make a HTTP-server from the WSGI-handler\n server = make_server('', port, self.wsgi)\n \n # Run the server until terminated\n server.serve_forever()", "def start(self):\n if not self.is_run:\n # set class attribute\n ThreadingTCPServer.address_family = socket.AF_INET6 if self.ipv6 else socket.AF_INET\n ThreadingTCPServer.daemon_threads = True\n # init server\n self._service = ThreadingTCPServer((self.host, self.port), self.ModbusService, bind_and_activate=False)\n # set socket options\n self._service.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self._service.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)\n # TODO test no_delay with bench\n self._service.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n # bind and activate\n self._service.server_bind()\n self._service.server_activate()\n # serve request\n if self.no_block:\n self._serve_th = Thread(target=self._serve)\n self._serve_th.daemon = True\n self._serve_th.start()\n else:\n self._serve()", "def start_http_server(ip, iterations_check=10):\n cmd = \"\\'python -m SimpleHTTPServer 80\"\n cmd = cmd + \" > /dev/null 2>&1 &\\'\"\n run_cmd_remote(ip, cmd)\n\n # Wait for the process to start before checking\n time.sleep(3)\n _, output, _ = run_cmd_remote(ip, \"ps aux | grep SimpleHTTPServer\")\n if not output:\n logger.error(\"Failed to start http server\")\n return False\n logger.info(output)\n\n while iterations_check > 0:\n _, output, _ = run_cmd_remote(ip, \"netstat -pntl | grep :80\")\n if output:\n return True\n else:\n logger.debug(\"Port 80 is not up yet\")\n iterations_check -= 1\n time.sleep(5)\n\n logger.error(\"Failed to start http server\")\n return False", "def listen(self):\n msgtmpl = (u'Serving on host %(bind)s:%(port)s')\n LOG.info(msgtmpl,\n {'bind': self._wsgi_conf.bind, 'port': self._wsgi_conf.port})\n\n httpd = simple_server.make_server(self._wsgi_conf.bind,\n self._wsgi_conf.port,\n self.app)\n httpd.serve_forever()", "def StartHttpServer(local_dir_path, host_port=0):\n assert local_dir_path\n httpd = _SilentTCPServer(('127.0.0.1', host_port),\n _GetHandlerClassForPath(local_dir_path))\n atexit.register(httpd.shutdown)\n\n http_thread = threading.Thread(target=httpd.serve_forever)\n http_thread.daemon = True\n http_thread.start()\n return httpd.server_address", "def serve_sphinx():\n print \"Open a Web browser to http://127.0.0.1:8000/\\n\"\n os.system('cd html && python -m SimpleHTTPServer')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read a file from the assets directory
def read_asset_file(self, file_name): this_dir = os.path.dirname(os.path.realpath(__file__)) asset_file = os.path.join(this_dir, 'assets', file_name) if not os.path.exists(asset_file): raise Exception('The asset file \'{0}\' does not exist in {1}'.format(file_name, this_dir)) return open(asset_file).read()
[ "def get_contents(filename):\n return resources.GetResource(os.path.join(_ASSETS_DIR, filename))", "def read_asset_file(self, asset_file):\n assets_data = {}\n execfile(asset_file, assets_data)\n return assets_data", "def _read_file(self):\n with open(self.file, 'r') as the_file:\n content = the_file.read()\n return content", "def get_asset(self, name):\n\t\tfilepath = os.path.join(self._env.get_assets_dir(), name)\n\t\tif not os.path.exists(filepath):\n\t\t\treturn None\n\t\treturn Asset(filepath)", "def readFile(self, mode = 'r'):\n\n # Read import file\n try:\n file = open(self.path, mode = mode)\n fileContent = file.read()\n file.close()\n\n return fileContent\n except Exception as e:\n print(\"The file `%s` could not be opened.\" % (self.path));\n print(e)\n sys.exit()", "def read(out, path):\n with open(os.path.join(out, path), \"r\") as f:\n return f.read()", "def asset(path: str) -> str:\n return os.path.join(os.path.dirname(__file__), \"..\", \"inputs\", path)", "def read_file(file_path):\n try:\n with open(file_path, \"r\") as iFile:\n content = iFile.read()\n return content\n except:\n return None", "def read_file(file_path):\n f = open(file_path, 'r')\n txt = f.read()\n f.close()\n return txt", "def read(from_file):\n if isinstance(from_file, file):\n return from_file.read()\n else:\n with open(from_file, 'r') as f:\n contents = f.read()\n f.close()\n return contents", "def file_read(filename):\n fobj = open(filename,'r');\n source = fobj.read();\n fobj.close()\n return source", "def load_asset(self, asset: str) -> list:\n path = f\"speed/assets/{asset}.txt\"\n new_list = []\n\n with open(path) as data:\n next(data)\n for line in data:\n new_list.append(line)\n\n return new_list", "def read_file(input_file):\n if isinstance(input_file, file):\n return input_file.read()\n else:\n with open(input_file, 'r') as f:\n contents = f.read()\n f.close()\n return contents", "def load_file(self, file_path):\n ...", "def get(self, tag):\n \n try:\n handle = self._assets[tag]\n except KeyError:\n raise AssetException( \"Unkown asset tag: \"+tag) \n \n if handle.asset is None:\n try:\n loader = self._loaders[handle.type]\n except KeyError:\n raise AssetException( \"Unrecognised type: \" \n + handle.type)\n handle.asset = loader.load( handle.filepath)\n return handle.asset", "def __read_file(self, path: str, binary: bool):\n mode = \"rb\" if binary else \"r\"\n encoding = None if binary else \"utf-8\"\n\n with self.__lock:\n if not os.path.isfile(path):\n return None\n\n with open(path, mode, encoding=encoding) as data_file:\n if binary:\n return data_file.read()\n\n return json.load(data_file)", "def open_and_read_file(file_path):\n\n text_file = open(file_path)\n text = text_file.read()\n\n return text", "def render_file(context, path):\n env = Environment(loader=FileSystemLoader('www/assets'))\n template = env.get_template(path)\n return template.render(**context)", "def contents_of_path(path):\n with open(path) as file:\n return file.read()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a value to the HTTP session
def add_session(self, key, value): global http_session if not session_disabled: http_session[key] = value print('Add to session: {0}={1}'.format(key, value))
[ "def session(self, value):\n self.AUTH_ARGS[\"session\"] = value\n self._SESSION_DT = datetime.datetime.utcnow() if value else None", "def add_message_to_session(request, message):\n i = 0\n\n if 'messages' in request.session:\n while str(i) in request.session['messages']:\n i += 1\n else:\n request.session['messages'] = dict()\n\n request.session.modified = True\n request.session['messages'][i] = message\n return request", "def __set_session_var(var_key, var_value):\n SessionHandler.__session_obj[var_key] = var_value", "def add_session(self, session):\n self.session_list.add(session)", "def _set_next_variable_to_session_if_found():\n\n if request.method == \"GET\" and request.args.get(\"next\"):\n Session.add(\"next\", session_value=request.args.get(\"next\"))", "def add_object_to_session(object, session):\n if session and object:\n session.add(object)", "def update_session(request, user):\n request.session['user'] = user.dict", "def add(self, session_cookie: str, session):\n # TODO: maybe store session ids instead of a whole requests session\n logger.debug(\"Adding session for {}\", session_cookie)\n self._sessions[session_cookie] = session", "def add(self, session):\n uuid = session.uuid\n timestamp = time.mktime(session.timestamp.timetuple())\n pickled_session = sqlite3.Binary(pickle.dumps(session, -1))\n\n query = \"INSERT INTO sessions VALUES (?, ?, ?);\"\n params = (uuid, timestamp, pickled_session)\n\n c = self.conn.cursor()\n c.execute(query, params)", "def add_variable(name, value):\n\n environ[name] = value", "def antispam_inc(request):\n if settings.ANTISPAM_SESSION in request.session:\n request.session[settings.ANTISPAM_SESSION] += 1\n else:\n request.session[settings.ANTISPAM_SESSION] = 1\n\n request.session.save()", "def store(self, key, value):\n self._store.session[key] = value\n self.commit()", "def append_cookie(self, value):\n if 'HTTP_COOKIE' in self.environ and self.environ['HTTP_COOKIE']:\n self.environ['HTTP_COOKIE'] += ';{}'.format(value)\n else:\n self.environ['HTTP_COOKIE'] = '{}'.format(value)", "def set_user_session_datum(self, user_id, key, value):\n logger = logging.getLogger(\"UserSessionManager.set_user_session_datum\")\n logger.debug(\"Entry. user_id: %s, key: %s, value: %s\" % (user_id, key, value)) \n assert(self.is_user_authorized(user_id))\n self.r.hset(user_id, key, value)", "def add(self, value: 'State') -> None:\n self._content.append(value)", "def add_to_session(self,word):\r\n self._words.insert(word)", "def add_session_to_request(request):\n middleware = SessionMiddleware()\n middleware.process_request(request)\n request.session.save()", "def add(self, product, qty):\n product_id = product.id ## save product_id in product.id\n\n ## the self.basket wich can acces the info and the basket can have informacion about the users session\n if product_id not in self.basket: ## if product_iud is not in the basket\n self.basket[product_id] = {'price': str(product.price), 'qty': int(qty)} ## is no session id exist it will create session\n\n self.session.modified = True ## telling django that we have modified the session", "def add_message(request, kind, message):\r\n messages = request.session.get('messages', [])\r\n messages.append((kind, message))\r\n request.session['messages'] = messages" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Output a JSON error message to the response stream
def output_error_json(self, message): error = { 'result': 'error', 'error': [message] } self.write(json.dumps(error))
[ "def send_json_error(err, code):\n msg = str(err).split(': ')[1]\n context = {'error': msg}\n return make_response(jsonify(**context), code)", "def render_JSON_Error(message, data={}):\n res = {\n 'status': 'Error',\n 'err': message,\n }\n res.update(data)\n return HttpResponse(json.dumps(res))", "def json_err(msg: str) -> Any:\n return jsonify({\"success\": False, \"error\": msg})", "def print_error(error):\n print json.dumps({'error': error})", "def jsonify_error(status, message, traceback, version): \\\r\n # pylint: disable=unused-argument\r\n\r\n cherrypy.response.headers['Content-Type'] = 'application/json'\r\n response_body = json.dumps(\r\n {\r\n 'error': {\r\n 'http_status': status,\r\n 'message': message,\r\n }\r\n })\r\n\r\n cherrypy.response.status = status\r\n\r\n return response_body", "def error(msg: str):\n return json.dumps({\"error\": msg})", "def jsonify_error(status: str, message: str, **traceback: dict) -> str:\n # Take the response generation of cherrypy in case of error\n response = cherrypy.response\n\n # Add the JSON Header\n response.headers[\"Content-Type\"] = \"application/json\"\n\n # Return the JSON with all the information\n return json.dumps(\n {\n \"status\": \"Failure\",\n \"status_details\": {\"message\": status, \"description\": message},\n }\n )", "def _create_rest_error_output(error_message, error_code):\r\n response = {\r\n \"success\": \"false\",\r\n \"data\": {},\r\n \"error\": {\r\n \"code\": error_code,\r\n \"message\": error_message\r\n }\r\n }\r\n return response", "def json_error_context(self, msg):\n self.status_code = 500\n return {'error': msg}", "def send_error_response(self, text) -> None:\n self.send_response(self.iopub_socket, 'stream', {'name': 'stderr', 'text': text})", "def output_json(errors: List[Error], stream: TextIO) -> None:\n json.dump(obj=[err.as_mapping() for err in errors], fp=stream, indent=2)", "def error(self, msg):\n self.send_response(400, msg)\n self.end_headers()\n self.wfile.write(msg)", "def handle_custom_error(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response", "def _render_error_response(self, code, title, message):\n\n if self._response_format == 'py':\n response = {'status': 'error',\n 'code': code,\n 'title': title,\n 'message': message}\n elif self._response_format == 'json':\n response = '{\"status\": \"error\", ' \\\n '\"code\": \"%s\", ' \\\n '\"title\": \"%s\", ' \\\n '\"message\": \"%s\"}' % (code, title, message)\n elif self._response_format == 'xml':\n response = '<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n' \\\n '<response>\\n' \\\n ' <status>error</status>\\n' \\\n ' <code>%s</code>\\n' \\\n ' <title>%s</title>\\n' \\\n ' <message>%s</message>\\n' \\\n '</response>' % (code, title, message)\n else:\n response = 'status: error\\n' \\\n 'code: %s\\n' \\\n 'title: %s\\n' \\\n 'message: %s' % (code, title, message)\n\n return response", "def server_error(error):\n return make_response(jsonify({\"message\": \"Internall error\"}),500)", "def print_error(response):\n print 'Status code: {0}'.format(response.status_code)", "def _build_error_response(message, status_code, error_id, **kwargs):\n\n return make_response(\n jsonify({\n \"status_code\": status_code,\n \"error\": {\n \"message\": message,\n \"id\": error_id\n },\n **kwargs\n }), status_code\n )", "def http_return_exception(exc_info=None, stream=sys.stdout):\n\n if exc_info == None:\n exc_info = sys.exc_info()\n\n stream.write(\"Content-type: text/html\\n\\n\");\n stream.write(format_exception(exc_info))", "async def error_to_json(req: Request, res: Response, exc: HTTPError):\n res.status_code = exc.status_code\n res.json = exc.as_json()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checking parameter list of numbers, if list is empty raise ValueError
def if_list_of_numbers_empty(self, list_of_numbers): if len(list_of_numbers) != 0: return list_of_numbers else: raise ValueError('List of numbers is empty')
[ "def validate_integers(*nums):\n for num in nums:\n if not isinstance(num, int):\n raise TypeError(\"Sorry. The function only works with integers.\")", "def _checkInputList(self, inputValue, defaultValue, inputValName, headerValName):\n checkData = False\n finalVals = []\n multVal = False\n if len(inputValue) != 0:\n try:\n if headerValName in inputValue[2]:\n finalVals = inputValue[7:]\n checkData = True\n self.__headerIncl = True\n self.__headerStr = inputValue[0:7]\n except:\n pass\n if checkData is False:\n for item in inputValue:\n try:\n finalVals.append(float(item))\n checkData = True\n except:\n checkData = False\n if len(finalVals) > 1:\n multVal = True\n if checkData is False:\n raise Exception(inputValName + \" input is not of a valid input type.\")\n else:\n checkData = True\n finalVals = defaultValue\n if len(finalVals) > 1:\n multVal = True\n\n return checkData, finalVals, multVal", "def test_bad_lists(self):\n input_numbers_list = [\"0a\", 1, 2, 3, 4, 8]\n self.function_verify_bad_input(input_numbers_list)\n\n input_numbers_list = [\"0\", 1, \"2._\", 3, 4, 8]\n self.function_verify_bad_input(input_numbers_list)\n\n input_numbers_list = 4\n self.function_verify_bad_input(input_numbers_list)\n\n input_numbers_list = \"5.6\"\n self.function_verify_bad_input(input_numbers_list)", "def _validate_list(values: Union[List[int], List[float], List[str]],\n allowed_types: List[Type[Any]], name: str) -> None:\n if not values:\n raise ValueError('{}: values list is empty.'.format(name))\n\n if not isinstance(values, list):\n raise TypeError('{}: values are in a {} but expected a list.'.format(\n name, type(values)))\n\n value_type = type(values[0])\n if value_type not in allowed_types:\n raise TypeError(\n '{}: values are expected to be one of {} but are {}.'.format(\n name, allowed_types, value_type))\n if not all(isinstance(value, value_type) for value in values):\n raise TypeError(\n '{}: all value types are expected to be {} but are not.'.format(\n name, value_type))", "def test_init_params_list_bad(self):\n with self.assertRaises(ValueError):\n insightiq_api.Parameters(['one', 1, 'two', 2])", "def _is_valid_value(value: List) -> bool:\n return isinstance(value, list)", "def check_type_p(p: Union[int, float, list, np.ndarray]) -> None:\n\n if isinstance(p, list):\n if any(not (isinstance(pi, (int, float)) and 0 <= pi <= 1) for pi in p):\n raise AssertionError(\"Some element in p is inappropriate (either not int nor float, or not in [0,1].\")\n\n return None", "def _validate(x):\n if not isinstance(x, int):\n raise TypeError(\"Only Integer Arrays are allowed\")", "def __validate(self):\n return all(isinstance(x, int) for x in self.spectrum)", "def _check_argument_list(runs, func):\n first = runs[0]\n is_consistent = False\n is_loosely_consistent = False\n\n sig = signature(func).parameters.values()\n \n if isinstance(first, np.ndarray):\n is_consistent = all([x.shape == first.shape for x in runs])\n if not is_consistent:\n raise MalformedArgListError('Inconsistent shapes of ndarrays')\n else:\n is_consistent = all([isinstance(x, type(first)) for x in runs])\n if is_consistent:\n if isinstance(first, list) or isinstance(first, tuple) or isinstance(first, dict):\n try: \n assert all([len(x) == len(first) for x in runs])\n except AssertionError:\n is_consistent = False\n is_loosely_consistent = True\n \n # TODO: check is_loosely_consistent. Maybe we can leave this to the function.\n \n return is_consistent or is_loosely_consistent", "def integers_only(lst):\n try:\n _ = [int(i) for i in lst]\n except:\n return False\n return True", "def _check_param_one_of(self, param: str, options: Iterable[Any]) -> None:\n value = getattr(self, param)\n if value not in options:\n *most, last = options\n option_str = \", \".join(f\"{x!r}\" for x in most[:-1]) + f\" or {last!r}\"\n err = \" \".join([\n f\"The `{param}` parameter for `{self.__class__.__name__}` must be\",\n f\"one of {option_str}; not {value!r}.\",\n ])\n raise ValueError(err)", "def test_empty_list(self):\n empty = []\n self.assertEqual(max_integer(empty), None)", "def _check_param(in_params, req_param, opt_param=list()):\n for param in req_param:\n if param not in in_params:\n raise ValueError('{} parameter is required'.format(param))\n defined_param = set(req_param+opt_param)\n for param in in_params:\n if param not in defined_param:\n logging.warning(\"Received unexpected parameter {}\".format(param))", "def _valid_comm_params(params):\n #if params is not a tuple, invalid params\n if not isinstance(params, tuple):\n return False\n #if params is not a 2-tuple, invalid params\n if len(params) != 2:\n return False\n\n log, time_table = params\n\n\n #log must be a list of events\n if not isinstance(log, list):\n return False\n #if log is a list but something in it is not an event\n for e in log:\n if not isinstance(e, Event):\n return False\n\n #if time_table is not a list, invalid params\n if not isinstance(time_table, list):\n return False\n\n #if some row is not a list, invalid params\n for row in time_table:\n if not isinstance(row, list):\n return False\n\n #if there's an entry in the time table that's not an integer >= 0, invalid\n for row in time_table:\n for entry in row:\n if not isinstance(entry, int):\n return False\n if entry < 0:\n return False\n\n return True", "def validator(data_list):\n all_valid = True # assume all elements is not None\n\n for value in data_list: # iterate through each element and\n if value is None: # check if it is None\n all_valid = False\n\n return all_valid # return the boolean results", "def check_type_x(x: Union[int, float, list, np.ndarray]) -> None:\n\n if isinstance(x, list):\n if any(not isinstance(xi, (int, float)) for xi in x):\n raise AssertionError(\"Some element of x is neither int nor float.\")\n\n return None", "def all_items_are_numeric(list_of_items):\n for elem in list_of_items:\n if not isinstance(elem, Number):\n return False\n return True", "def is_valid_shape(value):\n if is_int_positive(value):\n return True, value\n elif isinstance(value, tuple) or isinstance(value, list):\n for v in value:\n if not is_int_positive(v):\n return False, value\n return True, value\n else:\n return False, value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
takes a list of integers and returns it without positive numbers
def remove_positives(self, list_of_numbers): self.if_list_of_numbers_empty(list_of_numbers) remove_positives_list = [] for i in list_of_numbers: if i < 0: remove_positives_list.append(i) return remove_positives_list
[ "def remove_from_list_all_negative_numbers(in_list) -> list:\n i = 0\n while i < len(in_list):\n if in_list[i] < 0:\n del in_list[i]\n else:\n i = i + 1\n return in_list", "def afisareNumereNegativeNenule(lst):\n rezultat = []\n for i in lst:\n if i < 0:\n rezultat.append(i)\n return rezultat", "def nonneg(s):\n return filter(lambda x: x>=0, s)", "def suppr0(liste):\r\n return [n for n in liste if n!=0]", "def neg(x):\r\n return -min_elemwise(x, 0)", "def smallest_positive(list_of_ints):\n import pytest\n\n return min([i for i in list_of_ints if i > 0], default=0)", "def neg_sum(lst):\n mysum = 0\n for i in lst:\n if i < 0:\n mysum += i\n return mysum", "def nonz(self, arr: list):\n for i in range(len(arr)):\n if arr[i] == 0:\n continue\n else:\n return arr[i]", "def at_least_one_negative(lst):\n if not any(item < 0 for item in lst):\n lst[random.randint(0, len(lst) - 1)] *= -1\n return lst", "def remove_tilde(a_number_list:[int])->[int]:\r\n new_list = []\r\n for number in a_number_list:\r\n if number != 126:\r\n new_list.append(number)\r\n return new_list", "def clean_data(data):\n return [x for x in data if x > 0]", "def filter_positive_even_numbers(numbers):\n\n positive_even_numbers = [x for x in numbers if x > 0 and not x % 2]\n\n return positive_even_numbers", "def positive_places(f, xs):\n l = []\n for x in xs:\n if f(x) > 0:\n l.append(x)\n return l", "def nonzero_values(x):\n return x[x != 0]", "def trim_zeros(l):\n for j in range(len(l)):\n if l[0] == 0:\n del l[0]\n else:\n break\n for j in range(len(l)):\n if l[-1] == 0:\n del l[-1]\n else:\n break\n return l", "def find_missing_integer(lst):\n try:\n return sorted(set(range(lst[0], lst[-1])) - set(lst))[0]\n except:\n return max(lst) + 1", "def find_first_missing_positive(nums):\n\n i = 0\n n = len(nums)\n while i < n:\n j = nums[i] - 1\n # ignore any numbers <0 or larger than n.\n if 0 < nums[i] <= n and nums[i] != nums[j]:\n nums[i], nums[j] = nums[j], nums[i]\n else:\n i += 1\n # return the number not in it's index\n for i in range(n):\n if i + 1 != nums[i]:\n return i + 1\n # if [] or [1]\n return len(nums)+1", "def remove_zeros(input_data, minimum=0.002):\n output = []\n\n for d in input_data:\n if d[1] > minimum:\n output.append(d)\n\n return output", "def possibility_finder(row):\n num_list = [1,2,3,4,5,6,7,8,9]\n for i in row:\n if i != 0:\n num_list.remove(i)\n return num_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
takes a list of dates (integers) and removes those that are not 'leap years'
def filter_leaps(self, list_of_numbers): self.if_list_of_numbers_empty(list_of_numbers) leap_years_list = [] for i in list_of_numbers: if (i % 4 == 0) and (i % 100 != 0) or (i % 400 == 0): leap_years_list.append(i) return leap_years_list
[ "def find_leap_years(year):", "def leap_years(y1, y2):\r\n leaps = []\r\n takeouts = []\r\n for y in range(y1, y2, 1):\r\n if y % 4 == 0:\r\n leaps.append(True)\r\n if y % 400 != 0 and y % 100 == 0:\r\n takeouts.append(True)\r\n else:\r\n leaps.append(False)\r\n return leaps.count(True) - takeouts.count(True)", "def generate_selected_dates(year_from=2000, year_to=2020, doy_start=1, doy_end=-1):\n import calendar, time\n dates = []\n for year in range(year_from, year_to+1):\n if doy_end == -1:\n if calendar.isleap(year):\n end_day = 367\n else:\n end_day = 366\n else:\n end_day = doy_end\n dates_this_yr = [time.strftime(\"%Y.%m.%d\", time.strptime(\"%d/%d\" % (i, year),\n \"%j/%Y\")) for i in\n range(doy_start, end_day)]\n dates.extend(dates_this_yr)\n return dates", "def _keep_trading_days(self, dates):\n dates = [date for date in dates\n if date not in self.us_holidays and # remove holidays\n date.isoweekday() in range(1, 6)] # remove weekends\n return dates", "def test_is_leap_year(start_year, end_year):\n for i in range(start_year, end_year + 1):\n if i % 4 == 0:\n print(i)", "def get_acceptable_dates(date, margin):\n dates = [(date + timedelta(days=x)) for x in range(-margin, +margin + 1)]\n dates.sort()\n return dates", "def find_years(text):\r\n event_years = []\r\n tagged = nlp(text)\r\n ne = list(tagged.ents)\r\n dates = [entity.text for entity in ne if entity.label_ == 'DATE']\r\n current_year = datetime.datetime.now().year\r\n\r\n for date in dates:\r\n date_tagged = nlp(date)\r\n for word in date_tagged:\r\n if word.pos_ == 'NUM':\r\n try:\r\n year = parser.parse(word.text).year\r\n if year < current_year:\r\n event_years.append(str(year))\r\n elif year == current_year and str(current_year) in word.text:\r\n # Needed due to problems with small numbers that are not years\r\n event_years.append(str(year))\r\n except Exception as e:\r\n continue\r\n return event_years", "def calculateLyftDate(list):\n\n if len(list) is 0:\n return []\n\n dateList = []\n\n for date in list:\n dateItem = datetime.strptime(date, '%m-%d-%Y')\n dateList.append(dateItem.strftime('%Y-%m-%d'))\n\n return dateList", "def inspected_after_2018(l):\n for e in l:\n e = datetime.strptime(e[:10], '%Y-%m-%d')\n if e > datetime(year = 2018, month = 4, day = 1):\n return True\n return False", "def filter_date_in_range(phase_dates, starttime, endtime):\n phases = copy.deepcopy(phase_dates)\n for item in phases:\n if not (item[0] >= starttime and item[0] < endtime):\n item[0] = None\n if not (item[1] > starttime and item[1] <= endtime):\n item[1] = None\n\n new_phases = [\n item for item in phases if not (item[0] is None and item[1] is None)\n ]\n new_phases[0][0] = starttime\n new_phases[-1][1] = endtime\n return new_phases", "def adjust_exdates(self, rrules, exdate):\n def date_key(ex):\n if isinstance(ex, datetime.datetime):\n return ex\n elif isinstance(ex, list):\n if ex[1] is not None:\n return datetime.datetime(ex[1], ex[0], 1)\n elif (self.dtstart and ex[0] < self.dtstart.month) or ex[0] < self.now_date.month:\n return datetime.datetime(self.now_date.year+1, ex[0], 1)\n else:\n return datetime.datetime(self.now_date.year, ex[0], 1)\n else: # date\n return datetime.datetime(ex.year, ex.month, ex.day)\n\n exdate.sort(key=date_key)\n needs_time = False\n for ex in exdate:\n if not isinstance(ex, datetime.datetime):\n needs_time = True\n break\n if needs_time:\n new_exdate = []\n try:\n from dateutil.rrule import rrulestr\n rs = rrulestr(rrules, dtstart=self.now_date)\n ndx = 0\n for r in rs:\n while True:\n ex = exdate[ndx]\n if isinstance(ex, datetime.datetime):\n if r == ex:\n new_exdate.append(ex)\n if r >= ex:\n ndx += 1\n if ndx >= len(exdate):\n break\n continue # pragma nocover (see https://github.com/nedbat/coveragepy/issues/198)\n break\n elif isinstance(ex, list): # A month, with an optional year\n if r.month == ex[0] and (ex[1] is None or r.year == ex[1]):\n ex[1] = r.year # Claim the year\n new_exdate.append(r)\n if ex[1] is not None and (r.year > ex[1] or (r.year == ex[1] and r.month > ex[0])):\n ndx += 1\n if ndx >= len(exdate):\n break\n continue # pragma nocover\n break\n else: # A date\n rd = r.date()\n if rd == ex:\n new_exdate.append(r)\n if rd > ex:\n ndx += 1\n if ndx >= len(exdate):\n break\n continue # pragma nocover\n break\n if ndx >= len(exdate):\n break\n exdate = new_exdate\n except Exception as e: # pragma nocover\n log.debug(f'adjust_exdates({rrules}, {exdate}): Exception {e}')\n result = [e.strftime('%Y%m%dT%H%M%S') for e in exdate]\n log.debug(f'adjust_exdates({rrules}, {exdate}) = {result}')\n return result", "def year_intervals (years_list):\r\n \r\n #years_list = [1995,1996, 2000,2001,2002,2003,2004]\r\n \r\n years_list = list(map(float, years_list))\r\n \r\n years_list = list(map(int, years_list))\r\n \r\n n = len(years_list)\r\n \r\n start_y = list()\r\n end_y = list()\r\n \r\n start_y.append(years_list[0])\r\n \r\n if n > 1:\r\n for i in range(n-1):\r\n if(years_list[i+1] - years_list[i]>1):\r\n start_y.append(years_list[i+1])\r\n end_y.append(years_list[i])\r\n \r\n end_y.append(years_list[n-1])\r\n \r\n interval_yy = list()\r\n \r\n for i in range(len(start_y)):\r\n\r\n if end_y[i] - start_y[i]> 0 :\r\n interval_yy.append(str(start_y[i]) + '-' + str(end_y[i]))\r\n else:\r\n interval_yy.append(str(start_y[i]))\r\n\r\n \r\n x = \",\".join(interval_yy)\r\n return(x)", "def is_leap(year):\n\treturn not year%400 or not (year%4 and year%100)", "def scrape_years(self) -> list:\r\n cars = self.__cars\r\n years = []\r\n for car in cars:\r\n try:\r\n year = (\r\n car.find(\"span\", attrs={\"title\": \"Date of manufacture\"})\r\n .text.strip()\r\n .split(\"-\")[0]\r\n )\r\n except:\r\n year = None\r\n years.append(year)\r\n self.__years = [*self.__years, *years]\r\n return years", "def is_leap_year(year):\n return year % 4 == 0 and year % 100 != 0 or year % 400 == 0", "def determine_year_range(data):\n dates = [\n datetime.strptime(a[\"date\"], \"%Y-%m-%d\")\n for a in [d.get(\"pubDate\", None) for d in data]\n if a is not None\n ]\n\n # I guess O(2N) is probably better than having to sort the list\n return (min(dates).year, max(dates).year)", "def get_all_years(self, ) -> 'iterable[datetime.datetime]':\n # define earliest and latest years of entries\n start_year = self.query_all_entries().order_by(\n JournalEntry.create_date).first()\n end_year = self.query_all_entries().order_by(\n JournalEntry.create_date.desc()).first()\n if start_year and end_year:\n for y in range(start_year.create_date.year,\n end_year.create_date.year + 1):\n # find any entry within this year but before next year\n found = self.query_all_entries().filter(\n JournalEntry.create_date >= datetime.datetime(\n y, 1, 1, 0, 0)).filter(\n JournalEntry.create_date < datetime.datetime(\n y + 1, 1, 1, 0, 0)).first()\n # only yield this year if has an entry\n if found:\n yield datetime.datetime(y, 1, 1, 0, 0)", "def remove_year_from_date(date):\n return float(str(date)[4:])", "def datetime_fixer(date_list):\n # Checks if object is a Pandas Series and converts it to a list if true\n if isinstance(date_list, pd.core.series.Series):\n date_list = list(date_list)\n\n nats_added = 0\n\n for i in range(len(date_list)):\n # If the date is not a datetime\n if not isinstance(date_list[i], datetime.datetime):\n # If this date is an int\n if isinstance(date_list[i], int):\n if date_list[i] > 1000:\n # Convert Excel style date to datetime\n date_list[i] = datetime.datetime(*xlrd.xldate_as_tuple(date_list[i], 0))\n else:\n date_list[i] = np.datetime64('NaT')\n nats_added += 1\n # If this date is a string\n elif isinstance(date_list[i], str):\n # Try to convert to datetime using this format\n try:\n date_list[i] = datetime.strptime(date_list[i], '%m/%d/%Y')\n # If error, replace with NaT\n except:\n date_list[i] = np.datetime64('NaT')\n nats_added += 1\n else:\n date_list[i] = np.datetime64('NaT')\n nats_added += 1\n\n print('{} NaT added to list'.format(nats_added))\n return date_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> running_line(LOREM_IPSUM, 11, 0) ' ' >>> running_line(LOREM_IPSUM, 11, 5) ' Lorem' >>> running_line(LOREM_IPSUM, 11, 11) 'Lorem ipsum' >>> running_line(LOREM_IPSUM, 11, 22) ' dolor sit ' >>> running_line(LOREM_IPSUM, 11, 127) 'aliqua. ' >>> running_line(LOREM_IPSUM, 11, 138) ' Lore'
def running_line(text, window_size, tick): return ''
[ "def returnMemopsLine(value):\n \n if value:\n \n wordString = value.replace(os.linesep,' ')\n wordString = wordString[:80]\n \n return wordString\n \n else:\n \n return value", "def __manage_lines(message: str, color: str, first_line_start: str, new_line_start: str):\n index = 0\n output = str()\n for line in message.split(\"\\n\"):\n if not index:\n # First line.\n output += f\"{first_line_start}{color}{line}\\n\"\n else:\n # Any other line.\n output += f\"{new_line_start}{color}{line}\\n\"\n index += 1\n return output", "def add_line_numbers(source: str) -> str:\n return \"\\n\".join(f\"{n: <4}{line}\" for (n, line) in enumerate(source.split(\"\\n\"), 1))", "def display_line(line):\n table = get_ttable(line)\n parts = line.split()\n try:\n ones = int(parts[6])\n steps = int(parts[7])\n print display_ttable(table), \"# \",ones, \"\", steps, \"\", long_to_eng_str(ones,1,3), \"\", long_to_eng_str(steps,1,3)\n except:\n print display_ttable(table)", "def from_line(line):\n m = SummaryLine.REGEX.match(line)\n if not m:\n return None\n return SummaryLine(\n ncalls = _ncalls(m.group(1))\n , tottime = m.group(2)\n , tottime_percall = m.group(3)\n , cumtime = m.group(4)\n , cumtime_percall = m.group(5)\n , function = m.group(6)\n )", "def _get_line_numbers(self):\n\n output = ''\n row, col = self._text.index('end').split('.')\n i = 0\n for i in range(1, int(row) - 1):\n output += str(i) + '\\n'\n\n return output + str(i + 1)", "def setup_line(line, indices, salt):\n if len(indices) <= 0:\n return line\n length_of_line = len(line)\n new_line = CONST.EMPTY_STRING\n start_index = 0\n for index in indices:\n new_line += line[start_index : index + salt] + CONST.NEW_LINE\n start_index = index + salt\n new_line += line[start_index:]\n return new_line.strip(CONST.NEW_LINE)", "def _prefix_line(self, str, lines):\n\n new_lines = \"\"\n\n line_list = lines.split(\"\\n\")\n\n # Added since an empty line seems to be added.\n del line_list[-1]\n\n for l in line_list:\n new_lines = new_lines + str + l + \"\\n\"\n\n return new_lines", "def line_namer(i):\n r = []\n if (((i + 1) // 2) + 1) < 10:\n r = [\"{} |\".format(((i + 1) // 2) + 1)]\n else:\n r = [\"{}|\".format(((i + 1) // 2) + 1)]\n return r", "def summary_line(name, passed, width=100):\n\n # Passed.\n if passed == True:\n state = \"OK\"\n\n # Skipped.\n elif passed == 'skip':\n state = \"Skipped\"\n\n # Failed.\n else:\n state = \"Failed\"\n\n # Dots.\n dots = ''\n for j in range(width - len(name) - len(state) - 6):\n dots += '.'\n\n # Write out the line.\n sys.stdout.write(\"%s %s [ %s ]\\n\" % (name, dots, state))", "def generate_number_lines(number_of_lines=6, start=0, end=20):\n lines = [r'\\documentclass[letterpaper]{article}',\n r'\\usepackage{geometry}',\n r'\\geometry{landscape,a4paper,total={170mm,257mm},left=10mm,right=10mm,top=30mm}',\n r'\\usepackage{tikz}',\n r'\\usepackage{amsmath}',\n r'\\usetikzlibrary{arrows}',\n r'\\begin{document}',\n r'\\pagenumbering{gobble}',\n r'\\begin{LARGE}',\n r'']\n\n numbers = ','.join([str(x) for x in range(start, end + 1)])\n for _ in range(number_of_lines):\n lines.append(r'')\n lines.append(r'{\\Large $-$}')\n lines.append(r'\\begin{tikzpicture}')\n lines.append(r'\\draw[latex-latex, thick] ' + '({},0) -- ({},0) ;'.format(start - 1, end + 1))\n lines.append(r'\\foreach \\x in {' + numbers + '}')\n lines.append(r'\\draw[shift={(\\x,0)},color=black, thick] (0pt,3pt) -- (0pt,-3pt);')\n lines.append(r'\\foreach \\x in {' + numbers + '}')\n lines.append(r'\\draw[shift={(\\x,0)},color=black, thick] (0pt,0pt) -- (0pt,-3pt) node[below] ')\n lines.append(r'{\\textbf{\\x}};')\n lines.append(r'\\end{tikzpicture}')\n lines.append(r'{\\Large $+$}')\n lines.append(r'\\\\')\n lines.append(r'\\vspace*{50px}')\n lines.append(r'')\n\n lines.append(r'\\end{LARGE}')\n lines.append(r'\\end{document}')\n\n return '\\n'.join(lines)", "def lineHeight(scr, lineNode):\n if lineNode is None:\n return 0\n manyLines = (len(lineNode.value)+1)//scr.getmaxyx()[1]+1\n # above solution doesn't account for tabs\n return manyLines", "def _fill_line(line):\n # Length must be 164: TID, FGCID, IFX, IFY, 4 * 10 * (PFiPX, PFjPY, occupancy, speed)\n while len(line) < 164:\n line.append('')\n line.append('')\n line.append('')\n line.append('')\n return line", "def makeNewLineAdd(oldLine, myLen, distance):\n\n oldLine[1] = int(oldLine[1])\n oldLine[2] = int(oldLine[2])\n oldLine[6] = int(oldLine[6])\n oldLine[7] = int(oldLine[7])\n\n if oldLine[1] <= int(distance):\n oldLine[1] = 0\n oldLine[6] = 0\n else:\n oldLine[1] -= distance\n oldLine[6] -= distance\n\n if oldLine[2]+distance >= myLen:\n oldLine[2] = myLen-1\n oldLine[7] = myLen-1\n else:\n oldLine[2] += distance\n oldLine[7] += distance\n\n oldLine[9] = '1'\n oldLine[10] = str(oldLine[2]-oldLine[1])+','\n oldLine[11] = '0,'\n return(joiner(oldLine))", "def line(self, line):\n leader = line[:13]\n trailer = line[13:]\n\n decorator = (\n [\n termstyle.bold,\n termstyle.red if \"[ FAILED ]\" in line else termstyle.green,\n ]\n if \"[\" in leader\n else []\n )\n self.out(leader, decorator=decorator, end=\"\", verbose=1)\n self.out(trailer, verbose=1)", "def get_line_identifier(self):", "def iplogline(ipv4address, randomdatetime, randomNumOccurances):\n class IPLogLineFactory():\n \"\"\"Gives us one line of a logfile.\"\"\"\n def get(self):\n \"\"\"Returns a list object that is one line of a dummy logfile.\"\"\"\n return [ipv4address.get(), \\\n randomdatetime.get_iso(), \\\n randomNumOccurances.get()]\n return IPLogLineFactory()", "def l(line):\n\treturn line[:-1]", "def detector_start_line(self):\n return int(spice.gdpool('INS{}_FPUBIN_START_LINE'.format(self.ikid), 0, 1)[0])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Instance method on LinkedList. Add a new node with value newVal immediately after node with value val.
def insert_after(self, val, newVal): current = self.head while current._next: if current.val == val: new_node = Node(newVal, current._next) current._next = new_node self._size += 1 return current = current._next
[ "def insert_after(self, val, newVal):\n current = self.head\n while current:\n if current.val == val:\n position = current._next\n current._next = Node(newVal)\n current._next._next = position\n self._size += 1\n break\n current = current._next", "def insert_before(self, val, new_val):\n current = self.head\n while current._next.val != val:\n current = current._next\n current._next = Node(new_val, current._next)", "def insert_after(self, val: any, new_val: any) -> bool:\n curr = self.head\n while curr:\n if curr.val == val:\n new_node = Node(new_val)\n new_node.next = curr.next\n curr.next = new_node\n return True\n else:\n curr = curr.next\n else:\n return False", "def insert_before(self, val, newVal):\n current = self.head\n previous = None\n while current:\n if current.val == val:\n if previous is None:\n self.insert(newVal)\n else:\n new_node = Node(newVal)\n new_node._next = current\n previous._next = new_node\n self._size += 1\n break\n previous = current\n current = current._next", "def add(self, val):\n if self._head is None: # Check if head node is none\n self._head = Node(val)\n\n else:\n self.helper_add(self._head, val)", "def add_to_tail(self, value):\n new_node = ListNode(value)\n self.length += 1\n if not self.tail and not self.head:\n self.head = new_node\n self.tail = new_node\n else:\n new_node.prev = self.tail\n self.tail.next = new_node\n self.tail = new_node\n # self.length += 1", "def add(self, value):\n\n temp = self.head \n if(temp.value == None):\n self.head.value = value\n else:\n while(temp.next != None):\n temp = temp.next\n temp.next = Node(value)", "def append(self,value):\n node = Node(value)\n if self.head is None:\n self.head = node\n else:\n current = self.head\n while current.next is not None:\n current = current.next\n current.next = node", "def insert_before(self,value, new_val):\n \n new_val1 =Node(new_val)\n if self.head.value == value:\n new_val1.next = self.head\n self.head = new_val1\n else:\n curent = self.head\n while True:\n try:\n curent.next.value\n except:\n return 'Value Not Exist'\n else:\n if curent.next.value == value:\n old = curent.next\n new_val1.next = old\n curent.next = new_val1\n else:\n curent = curent.next\n continue\n break", "def add_to_head(self, value):\n # create a new node\n new_node = ListNode(value, None, None)\n self.length +=1\n # 1. add to empty\n if self.head is None:\n self.head = new_node\n self.tail = new_node\n # 2. add to nonempty\n else:\n new_node.next = self.head\n self.head.prev = new_node\n self.head = new_node\n # update the lenght\n # self.length +=1", "def insert_ele(self, val):\n \n # First consider if the new node could be inserted as the list head.\n q = Node(val)\n if self.head == None or val < self.head.value:\n q.succ = self.head\n self.head = q\n return\n\n # Walk through the list until the next value is None or the next value is larger or equal to the one to be inserted\n p = self.head\n while p.succ != None and p.succ.value < val:\n p = p.succ\n q.succ = p.succ\n p.succ = q", "def insert_before(self, val: any, new_val: any) -> bool:\n curr = prev = self.head\n while curr:\n if curr.val == val:\n new_node = Node(new_val)\n if curr == self.head:\n self.head = new_node\n new_node.next = curr\n else:\n prev.next = new_node\n new_node.next = curr\n return True\n else:\n prev, curr = curr, curr.next\n else:\n return False", "def test_append_tail_reassign(dll):\n dll.append(6)\n assert dll.tail.prev.next is dll.tail", "def test_linked_list_push_moves_old_head_to_new_head_next():\n from linked_list import LinkedList\n l = LinkedList()\n l.push('val')\n l.push('val2')\n assert l.head.next.data == 'val'", "def add(self, value):\n\n my_element = Element(value)\n\n if self.last:\n self.last.next = my_element\n\n self.last = my_element\n self.head = self.head or my_element\n self.my_length += 1", "def addAtIndex(self, index, val):\n\n node = Nodes(val)\n curr = self.head\n indx = 0\n prev = None\n has_index = False\n while curr:\n if indx == index:\n has_index = True\n break\n prev = curr\n curr = curr.next\n indx += 1\n if has_index:\n prev.next = node\n node.next = curr", "def add_after_node(self, key, data):\n cur = self.head\n while cur:\n if cur.data == key:\n if cur.next is None:\n self.append(data)\n return\n new_node = Node(data)\n new_node.next = cur.next\n cur.next.prev = new_node\n cur.next = new_node\n new_node.prev = cur\n return\n else:\n cur = cur.next", "def add(self, item):\n temp = Node(item)\n temp.set_next(self.head)\n self.head = temp", "def sort_append(self, value):\n\t\tif self.head is None:\n\t\t\tself.head = Node(value)\n\t\t\treturn\n\t\t\n\t\tif value < self.head.value:\n\t\t\tnode = Node(value)\n\t\t\tnode.next = self.head\n\t\t\tself.head = node\n\t\t\treturn\n\t\t\n\t\tnode = self.head\n\t\twhile node.next is not None and value >= node.next.value:\n\t\t\tnode = node.next\n\t\t\t\n\t\tnew_node = Node(value)\n\t\tnew_node.next = node.next\n\t\tnode.next = new_node\n\t\t\n\t\treturn None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the UserOptions object from the supplied config file. If no filename is supplied, look in the default location (see importlinter.cli.lint_imports).
def read_user_options(config_filename: Optional[str] = None) -> UserOptions: readers = settings.USER_OPTION_READERS.values() if config_filename: if config_filename.endswith(".toml"): readers = [settings.USER_OPTION_READERS["toml"]] else: readers = [settings.USER_OPTION_READERS["ini"]] for reader in readers: options = reader.read_options(config_filename=config_filename) if options: normalized_options = _normalize_user_options(options) return normalized_options raise FileNotFoundError("Could not read any configuration.")
[ "def _read_config_file():\n json_file_path = os.path.join(os.path.dirname(__file__),\n 'users-settings.json')\n with open(json_file_path) as settings:\n return json.load(settings)", "def _load_user_config(self):\n config = RawConfigParser()\n config.add_section('copr-user')\n config.set('copr-user', 'ssh_key', '~/.ssh/id_rsa')\n\n copr_conf = os.path.expanduser(\"~/.config/copr\")\n if os.path.exists(copr_conf):\n config.read(copr_conf)\n config.set('copr-user', 'username', config.get('copr-cli', 'username'))\n\n tito_dir = os.path.join(find_git_root(), tito_config_dir())\n copr_local = os.path.join(tito_dir, \"copr_user.conf\")\n if os.path.exists(copr_local):\n config.read(copr_local)\n\n if not config.has_option('copr-user', 'username'):\n raise Exception(\"Can not load username from '~/.config/copr' and 'copr_user.conf'\")\n\n return config", "def __init__(self, filePath):\n ConfigParser.__init__(self)\n ConfigParser.readfp(self, open(filePath))\n\n self.userMap = {}\n if self.has_section('userMap'):\n for name in self.options('userMap'):\n self.userMap[name] = self.get('userMap', name)", "def load_cfg(self,filepath):\n config = configparser.ConfigParser()\n config.read([filepath])\n return config", "def parse_config_file(\n filename: str,\n) -> RBToolsConfig:\n try:\n config = _load_python_reviewboardrc(filename)\n except SyntaxError as e:\n raise ConfigSyntaxError(filename=filename,\n line=e.lineno,\n column=e.offset,\n details=str(e))\n\n return RBToolsConfig(\n filename=filename,\n config_dict={\n key: config[key]\n for key in set(config.keys()) - set(_builtins.keys())\n })", "def read(self):\n\n # Add options from config file.\n print self._config.get_all()\n for id, (val, type) in self._config.get_all().items():\n if type == 'src' and not self.check(id, val): # Don't use wrong paths\n log.warning(_('idg.options.not.valid.use.default') + id +\\\n \" \" + val)\n continue\n self._opts[id] = [val, type]\n\n dom = self._config.dom()\n if dom is None:\n log.error(_('idg.options.cant.parse.config.file') +\\\n self._config.path())\n return\n else:\n log.info(_('idg.options.using.config.file') + self._config.path())", "def _get_file(self, config):\n config_file = SafeConfigParser()\n config_file.read([os.path.expanduser('~/.scrapekit.ini')])\n if config_file.has_section('scrapekit'):\n config.update(dict(config_file.items('scrapekit')))\n if config_file.has_section(self.scraper.name):\n config.update(dict(config_file.items(self.scraper.name)))\n return config", "def load_config():\n\n parser = argparse.ArgumentParser()\n\n # Only one argument is expected\n parser.add_argument(\"--config_path\", required=True, type=str,\n default=None, help=\"Path to configuration JSON file\")\n args = parser.parse_args()\n\n config_path = args.config_path\n\n try:\n with open(config_path, \"r\") as fh:\n return json.load(fh)\n except OSError:\n sys.exit(\"Configuration file does not exist\")\n except json.JSONDecodeError:\n sys.exit(\"Configuration file is not a valid JSON\")", "def get_config(config_file=None):\n config = {}\n if os.path.exists(DEFAULT_CONFIG):\n with open(DEFAULT_CONFIG, 'r') as f:\n config.update(json.load(f))\n\n if not config_file:\n config_file = os.environ.get('MARVINBOT_CONFIG') or 'settings.json'\n\n if os.path.exists(config_file):\n with open(config_file, 'r') as f:\n config.update(json.load(f))\n\n else:\n raise ValueError('ConfigFile [{}] not found'.format(config_file))\n\n return config", "def config(self) -> ConfigParser:\n config = ConfigParser()\n config.read(self.path/\"config.cfg\")\n return config", "def get_settings_from_file(file_path):\n credentials = {}\n\n if os.path.exists(file_path):\n with open(file_path, 'r') as credentials_file:\n try:\n credentials = json.loads(credentials_file.read())\n except ValueError:\n print('oAuth config file contents cannot be parsed into json.')\n return None\n\n return credentials", "def _read_config():\n\n import configparser\n import os\n\n basepath = os.getcwd()\n prev = None\n while basepath != prev:\n prev = basepath\n path = os.path.join(basepath, 'uriconfig.ini')\n if os.path.exists(path):\n break\n basepath = os.path.split(basepath)[0]\n\n parser = configparser.ConfigParser()\n parser.read(path)\n return parser", "def read_config(config_file):\n\n config = SafeConfigParser(defaults=DEFAULTS)\n try:\n files_read = config.read(config_file)\n except MissingSectionHeaderError:\n raise Exception('Config file {} appears to be empty or misconfigured'.format(config_file))\n \n if config_file not in files_read:\n raise Exception('Config file {} not found'.format(config_file))\n\n return config", "def _read_oci_config(fname, profile='DEFAULT'):\n _logger.debug('%s', where_am_i())\n full_fname = os.path.expanduser(fname)\n try:\n oci_config = oci_sdk.config.from_file(full_fname, profile)\n return oci_config\n except oci_sdk.exceptions.ConfigFileNotFound as e:\n _logger.debug(\"Unable to read OCI config file: %s\", str(e))\n raise Exception('Unable to read OCI config file') from e", "def getUserConfigOptions( self ):\n return self._userSettings", "def readConfiguration (configurationFilePath = None):\n \n pathList = getConfigurationPaths(configurationFilePath)\n \n # if not silent:\n # if len(pathList) is 1:\n # print(\"Loading options from {:s}\".format(pathList[0]))\n # else:\n # print(\"Loading overrides from {:s}\".format(pathList[-1]))\n\n configuration = DEFAULTCONFIGURATION\n configuration['CONFIGDIR'] = os.path.dirname(pathList[0])\n for path in pathList:\n configuration = parseConfiguration(path, configuration)\n\n return(configuration)\n\n # TODO: Validate configuration", "def get_config(path_to_config: str = None) -> Config:\n\n path_to_config = os.path.join(\n os.path.dirname(__file__),\n \"..\",\n \"configuration.ini\"\n )\n\n cfg = configparser.ConfigParser()\n cfg.read(path_to_config)\n\n return Config(**cfg[\"main\"])", "def read_config(filename):\n with open(filename) as fobj:\n return json.load(fobj)", "def parse_options_file():\n options = {}\n if not os.path.exists(\"se_options\"):\n return {}\n\n with open(\"se_options\") as config_fin:\n for line in config_fin:\n line = line.strip()\n option_name = line.split(\"=\")[0]\n option_value = line.split(\"=\")[1]\n options[option_name] = option_value\n\n return options" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse a string into a Python class.
def _string_to_class(string: str) -> Type: components = string.split(".") class_name = components[-1] module_name = ".".join(components[:-1]) module = importlib.import_module(module_name) cls = getattr(module, class_name) assert isinstance(cls, type) return cls
[ "def parse(cls, string):\n obj, i = cls.match(string, 0)\n if i != len(string):\n raise NotParseable(f\"Found unexpected {string[i]}.\", i + 1)\n return obj", "def stringToClass(cls_str):\n import_stg1 = cls_str.split(\" \")[1]\n import_stg2 = import_stg1.replace(\"'\", \"\")\n import_stg3 = import_stg2.replace(\">\", \"\")\n import_parse = import_stg3.split(\".\")\n cls = import_parse[-1]\n import_path = '.'.join(import_parse[:-1])\n import_statement = \"from %s import %s\" % (import_path, cls)\n exec(import_statement)\n this_class = None\n assign_statement = \"this_class = %s\" % cls\n exec(assign_statement)\n return this_class", "def parse_class_name(string):\n\n class_parts = string.split(\".\")\n class_name = class_parts[-1]\n\n # TODO should we not assume that everything is from neuralmonkey?\n module_name = \".\".join([\"neuralmonkey\"] + class_parts[:-1])\n\n try:\n module = importlib.import_module(module_name)\n except ImportError as exc:\n # if the problem is really importing the module\n if exc.name == module_name:\n raise Exception((\"Interpretation '{}' as type name, module '{}' \"\n \"does not exist. Did you mean file './{}'? \\n{}\")\n .format(string, module_name, string, exc)) from None\n else:\n raise\n\n try:\n clazz = getattr(module, class_name)\n except AttributeError as exc:\n raise Exception((\"Interpretation '{}' as type name, class '{}' \"\n \"does not exist. Did you mean file './{}'? \\n{}\")\n .format(string, class_name, string, exc))\n return clazz", "def from_arguments(cls, argstring):\n\n obj = object.__new__(cls)\n obj.parse(argstring)\n return obj", "def from_str(cls, as_str):", "def parse(cls, input):", "def deserialize(string, cls):\n d = json.loads(string)\n d = _unwrap_dict(d, cls)\n obj = _dict_to_obj(d, cls)\n return obj", "def import_obj_from_str(s):\n ast_obj = ast.parse(s).body[0]\n return ast_type_to_import_type[type(ast_obj)](ast_obj)", "def get_class_from_string(class_name: str) -> Type[Any]:\n\n parts = class_name.split(\".\")\n module_name = \".\".join(parts[:-1])\n cls: Type[Any] = __import__(module_name)\n for comp in parts[1:]:\n cls = getattr(cls, comp)\n return cls", "def parse(cls, string: str):\n task = string.split(\"::\")\n if len(task) == 1:\n dataset = task[0]\n split = None\n elif len(task) == 2:\n dataset = task[0]\n split = task[1]\n else:\n raise ValueError(\"Received unexpected dataset specification.\")\n\n return Task(dataset, split)", "def parse(cls, string: str) -> Lipid:\n # TODO refactor, too much code repeated in the two if blocks\n\n # identify abbreviation type\n if '(' in string and ')' in string:\n string = string.strip()\n\n if not string.endswith(')'):\n raise TypeError(f\"Cannot parse abbreviation {string}\")\n\n lipid_class_name = string.split('(', 1)[0]\n # second part of split at first ( is residue string, add leading ( again!\n residue_string = '(' + string.split('(', 1)[1]\n\n lipidclass = LipidClass.parse(lipid_class_name)\n\n residuelist = ResidueList.parse(residue_string)\n\n lipid = cls(lipidclass, residuelist)\n lipid._input = string\n\n return lipid\n\n # CE 22:4;0\n elif ' ' in string:\n lipid_class_name, residue_string = string.split(' ', 1)\n\n lipidclass = LipidClass.parse(lipid_class_name)\n residuelist = ResidueList.parse(residue_string)\n\n lipid = cls(lipidclass, residuelist)\n lipid._input = string\n\n return lipid\n\n else:\n lipid = Lipid(LipidClass(string))\n lipid._input = string\n return lipid", "def from_string(cls, contents):\n lines = contents.split('\\n')\n keywords = cls._parse_keywords(lines[0:1])\n title = lines[1: 3]\n mol = cls._parse_molecule(lines[3:])\n d = {\"keywords\": keywords, \"title\": title, \"molecule\": mol.as_dict(),\n \"@module\": cls.__module__, \"@class\": cls.__name__}\n return MopTask.from_dict(d)", "def from_string(cls, string):\n words = string.split(' ')\n if len(words) < 3:\n raise ValueError('A move have to contain a minimum of a name and one position.')\n if len(words) % 2 != 1:\n raise ValueError('Expected one more integer')\n\n name = words[0]\n try:\n ints = map(int, words[1:])\n except ValueError as e:\n raise e\n couples = zip(ints[::2], ints[1::2])\n return cls(name, couples)", "def from_cif(cls, string: str):\n cif_data = Data()\n flag = cif_data.take_from_string(string)\n\n cif_items = cif_data.items\n cif_loops = cif_data.loops\n\n items = []\n flag = True\n n_mandatory = len(cls.CLASSES_MANDATORY)\n for i_cls, cls_ in enumerate(cls.CLASSES):\n flag = i_cls >= n_mandatory\n if issubclass(cls_, ItemN):\n prefix_cls = cls_.PREFIX\n if cif_items.is_prefix(prefix_cls):\n cif_items_prefix = cif_items[prefix_cls]\n cif_string = str(cif_items_prefix)\n obj_prefix = cls_.from_cif(cif_string)\n if obj_prefix is not None:\n items.append(obj_prefix)\n flag = True\n elif issubclass(cls_, LoopN):\n prefix_cls = cls_.ITEM_CLASS.PREFIX\n for cif_loop in cif_loops:\n if cif_loop.is_prefix(\"_\"+prefix_cls):\n cif_string = str(cif_loop)\n obj_prefix = cls_.from_cif(cif_string)\n if obj_prefix is not None:\n items.append(obj_prefix)\n flag = True\n if (not(flag)):\n warn(f\"Mandatory class: '{cls_.__name__:}' is not given.\",\n UserWarning)\n break\n\n if not(flag):\n return None\n\n data_name = cif_data.name\n obj = cls(data_name=data_name, items=items)\n obj.form_object()\n return obj", "def from_string(cls, source: str, filename: str):\n globals = {} # type: Dict[str, Any]\n source_ast = ast.parse(source, type_comments=True)\n code = compile(source_ast, filename, mode=\"exec\")\n exec(code, globals)\n return cls(source_ast, filename, globals)", "def deserialize(self, str):", "def fromstring(lex_str, include_semantics: bool = ...):\n ...", "def string_parser(string):\n\n # converts string into a list\n if ', ' in string:\n config = []\n # converts each item in the list into its respective types\n for item in string.split(', '):\n config.append(string_parser(item))\n return config\n # converts string to boolean\n elif string == 'True':\n return True\n elif string == 'False':\n return False\n # converts string to int\n elif string.count('.') == 0:\n try:\n return int(string)\n except ValueError:\n pass\n # converts string to float\n else:\n try:\n return float(string)\n except ValueError:\n pass\n\n # does not convert string if already is a string\n return string", "def fromstring(cls, s):\n lines = s.split(\"\\n\")\n nlines = len(lines)\n current_subroutine = None\n\n prg = Program_UnAssembled()\n\n print lines\n\n for iline in xrange(nlines):\n print iline + 1\n line = lines[iline]\n print line\n elts = line.split()\n\n if len(elts) < 1:\n # empty line\n continue\n\n # label\n if elts[0][-1] == ':':\n # first elt is a label -> start of a subroutine\n subroutine_name = elts[0][:-1]\n prg.subroutines[subroutine_name] = Subroutine()\n prg.subroutines_names.append(subroutine_name)\n current_subroutine = prg.subroutines[subroutine_name]\n elts = elts[1:]\n\n if len(elts) < 1:\n # empty label\n continue\n\n s = \" \".join(elts)\n\n instr = Instruction.fromstring(s)\n print \"INSTR = \", instr\n if instr is None:\n continue\n\n if current_subroutine is not None:\n current_subroutine.instructions.append(instr)\n else:\n prg.instructions.append(instr)\n\n if instr.opcode == Instruction.OP_ReturnFromSubroutine:\n current_subroutine = None\n\n return prg\n\n\n # @classmethod\n # def fromxmlstring(cls, s):\n # \"\"\"\n # Create a new UnAssembledProgram from a XML string.\n # \"\"\"\n # pass", "def from_string(cls, value):\n if rangedNumber.looks_like_rangedNumber(value):\n parts = re.split(\"\\s*:\\s*\", value)\n lo = parts[0].lstrip(\"[ \")\n best = parts[1]\n hi = parts[2].rstrip(\"] \")\n # The regular instantiator will do the rest of the type checking.\n return cls(lo, best, hi)\n else:\n return cls(None, value, None)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a boolean (or None) for the include_external_packages option in user_options.
def _get_include_external_packages(user_options: UserOptions) -> Optional[bool]: try: include_external_packages_str = user_options.session_options["include_external_packages"] except KeyError: return None # Cast the string to a boolean. return include_external_packages_str in ("True", "true")
[ "def verify_use_incredibuild(ctx, option_name, value):\t\t\n\tif not _is_user_option_true(value):\n\t\treturn (True,\"\",\"\")\t\n\t(res, warning, error) = _verify_incredibuild_licence('Make && Build Tools Extension Package', 'All Platforms')\t\n\treturn (res, warning, error)", "def toolHasOptions():\n pass", "def verify_use_incredibuild_win(ctx, option_name, value):\t\n\tif not _is_user_option_true(value):\n\t\treturn (True,\"\",\"\")\t\n\t(res, warning, error) = _verify_incredibuild_licence('Make && Build Tools Extension Package', 'Windows')\t\n\treturn (res, warning, error)", "def HasGlobalOption(self, option):\n\n return option in map(lambda x: x[0], self.__global_options)", "def is_third_party(self) -> bool:\n return any(\n self.source.startswith(third_party_import_string)\n for third_party_import_string in self.third_party_import_strings\n )", "def req_uses_extra(req: pkg_resources.Requirement, extra: Optional[str]) -> bool:\n if extra and not req.marker:\n return False\n keep_req = True\n if req.marker:\n extras = {\"extra\": \"\"}\n if extra:\n extras = {\"extra\": extra}\n keep_req = req.marker.evaluate(extras)\n return keep_req", "def has_required_config(self):\n config = get_config()\n\n # the following options MUST be set by the user before FLACManager can\n # be used\n return (\n config[\"Organize\"].get(\"library_root\")\n and config[\"Gracenote\"].get(\"client_id\")\n and config[\"MusicBrainz\"].get(\"contact_url_or_email\")\n and config[\"MusicBrainz\"].get(\"libdiscid_location\")\n )", "def _should_use_importlib_metadata() -> bool:\n with contextlib.suppress(KeyError, ValueError):\n return bool(strtobool(os.environ[\"_PIP_USE_IMPORTLIB_METADATA\"]))\n if sys.version_info < (3, 11):\n return False\n import importlib.metadata\n\n return bool(getattr(importlib.metadata, \"_PIP_USE_IMPORTLIB_METADATA\", True))", "def _is_extended_application_package(self, package_name):\n\n return package_name in self._extended_application_components", "def auto_install(self):\n value = os.environ.get('PIP_ACCEL_AUTO_INSTALL')\n return coerce_boolean(value) if value else None", "def is_installed(self, packages):\n return False", "def _is_node_option_set(resource: Dict) -> bool:\n try:\n node_options = resource[\"Properties\"][\"Environment\"][\"Variables\"][\"NODE_OPTIONS\"]\n\n return \"--enable-source-maps\" in node_options.split()\n except (KeyError, AttributeError):\n return False", "def is_local_user_enabled(self) -> Optional[bool]:\n return pulumi.get(self, \"is_local_user_enabled\")", "def get_data_file_option(par_contents, opt_text):\n opt_found = False\n if opt_text in par_contents['main']:\n opt_str = par_contents['main'][opt_text].upper()\n opt_found = mlp_utils.is_option_value_true(opt_str)\n return opt_found", "def user_globals(context):\n scn = context.scene\n vars = context.preferences.addons[__name__].preferences.global_variable_coll\n if any(ext in scn.render.filepath for ext in vars.keys()):\n return True\n if scn.use_nodes and len(scn.node_tree.nodes) > 0:\n tree = scn.node_tree\n nodes = (n for n in tree.nodes if n.type=='OUTPUT_FILE')\n for node in nodes:\n if any(ext in node.base_path for ext in vars.keys()):\n return True\n if \"LAYER\" in node.format.file_format:\n for slot in node.layer_slots:\n if any(ext in slot.name for ext in vars.keys()):\n return True\n else:\n for slot in node.file_slots:\n if any(ext in slot.path for ext in vars.keys()):\n return True\n return False", "def _is_other_application_package(self, package_name):\n\n return package_name in self._other_application_components", "def vendor_options(self) -> Optional[pulumi.Input['VolumeAttachVendorOptionsArgs']]:\n return pulumi.get(self, \"vendor_options\")", "def use_system_library(library):\n return (\n get_distutils_build_or_install_option('use_system_{0}'.format(library)) or\n get_distutils_build_or_install_option('use_system_libraries'))", "def _has_required_package():\n\n packages_ok = True\n\n # Check tensorflow with a recent version is installed.\n try:\n # pylint: disable=g-import-not-at-top\n import tensorflow as tf\n # pylint: enable=g-import-not-at-top\n except ImportError:\n eprint('Cannot import Tensorflow. Please verify '\n '\"python -c \\'import tensorflow\\'\" works.')\n packages_ok = False\n try:\n if tf.__version__ < '0.10.0':\n eprint('Tensorflow version must be at least 0.10.0. ',\n VERIFY_TENSORFLOW_VERSION)\n packages_ok = False\n except (NameError, AttributeError) as e:\n eprint('Error while getting the installed TensorFlow version: ', e,\n '\\n', VERIFY_TENSORFLOW_VERSION)\n packages_ok = False\n\n # Check cloud ml sdk with a recent version is installed.\n try:\n # pylint: disable=g-import-not-at-top\n import google.cloud.ml as cloudml\n # pylint: enable=g-import-not-at-top\n except ImportError:\n eprint('Cannot import google.cloud.ml. Please verify '\n '\"python -c \\'import google.cloud.ml\\'\" works.')\n packages_ok = False\n try:\n if cloudml.__version__ < '0.1.7':\n eprint('Cloudml SDK version must be at least 0.1.7 '\n 'to run local prediction. ', VERIFY_CLOUDML_VERSION)\n packages_ok = False\n except (NameError, AttributeError) as e:\n eprint('Error while getting the installed Cloudml SDK version: ', e,\n '\\n', VERIFY_CLOUDML_VERSION)\n packages_ok = False\n\n return packages_ok", "def _is_custom_package(self, package_name):\n\n return self._is_custom_component(package_name)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a boolean (or None) for the show_timings option in user_options.
def _get_show_timings(user_options: UserOptions) -> bool: try: show_timings_str = user_options.session_options["show_timings"] except KeyError: return False # Cast the string to a boolean. return show_timings_str in ("True", "true")
[ "def test_get_option_strikes_realtime(self):\n pass", "def test_get_option(self, debug_session, tdevice):\n debug_session.connect()\n\n result = debug_session.get_option(tdevice[\"option\"])\n assert result == False", "def _get_areTipsAndTricksShown(self) -> \"bool\" :\n return _core.GeneralPreferences__get_areTipsAndTricksShown(self)", "def show_help_options(options):\n ret = False\n if options['help_call']:\n show_help_call()\n ret = True\n if options['help_format']:\n show_help_format()\n ret = True\n return ret", "def should_show_query():\n return config.ALWAYS_SHOW is True", "def test_get_options_stats_realtime(self):\n pass", "def has_options(self):\n return self.options.has_options()", "def HasTIM(self):\n return self.__has('TIM')", "def has_option (self, long_option):\r\n return self.option_index.has_key(long_option)", "def enable_performance_insights(self) -> typing.Optional[bool]:\n return self._values.get('enable_performance_insights')", "def byass_time_point_status(self):\n return False", "def honor_timestamps(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"honor_timestamps\")", "def colourblind_options_on(self, request):\n\n try:\n settings = Settings.objects.get(user=request.user)\n return settings.colourblind_opts_on\n except Settings.DoesNotExist:\n return False", "def _is_an_option(option_value: Union[_TomlValue, Dict]) -> bool:\n if isinstance(option_value, dict):\n return \"add\" in option_value or \"remove\" in option_value\n return True", "def isTuning(self):\r\n return self['TS'] != b'0'", "def is_time(self) -> \"bool\":\n return self._value.getType() == Value.TVAL", "def test_get_option_expirations_realtime(self):\n pass", "def dev(self):\r\n try:\r\n dev = self.get('dev')\r\n if isinstance(dev, bool):\r\n return dev\r\n else:\r\n return False\r\n except KeyError, e:\r\n return False", "def _option_exists(self, option):\n for call in self.calls.values():\n for kwarg in call:\n if option == kwarg:\n return True\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
End point for test response
def test(): return "Test Response", 201
[ "def test_send_result(self):\n pass", "def test_ok_returned_ticket(self):\n process_result = process_response(self.resp_ok)\n self.assertEqual(process_result[\"detail\"], self.sample_ok)", "def test_get_responce(self):\n self.assertEqual(self.r.status_code, 200)", "def end_test(self):", "def test_ok_result(self):\n process_result = process_response(self.resp_ok)\n self.assertEqual(process_result[\"result\"], 0)", "def test_response_without_notifications(self):\n request = http.HttpRequest()\n response = http.HttpResponse()\n self.middleware.process_response(request, response)", "def test_status_request(self):\n pass", "def outputServerStub():\r\n data = request.form\r\n print(data)\r\n return jsonify(isError= False,\r\n message= \"Success\",\r\n statusCode= 200,\r\n data= data), 200", "def test_response_data(self):\n tester = app.test_client(self)\n response = tester.get(DUMMY_ROUTE)\n self.assertEqual(response.content_encoding,\"gzip\")", "def test(self):\n return self._request('GET', '/test')", "def test_mock_service_dynamic(self):\n log.info(\"Calling %s.\" % inspect.stack()[0].function)\n url = \"http://127.0.0.1:5000/anyendpoint\"\n response_code = \"202\"\n response_body = '{\"code\": 0, \"message\": \"all good\"}'\n headers = {\"Response-Code\": response_code, \"Response-Body\": response_body}\n resp = self.get(url, headers=headers)\n assert resp != None\n assert resp[\"code\"] == 0\n log.info(\"Test %s passed.\" % inspect.stack()[0].function)\n \"\"\" response\n 202 \n \n {\"code\": 0, \"message\": \"all good\"}\n \"\"\"", "def test_main(self, mock):\n\n mock.get(self.url, text='resp')\n main()", "def test_type_response():\n res = ResMsg()\n now = datetime.now()\n date = datetime.now().date()\n num = Decimal(11.11)\n test_dict = dict(now=now, date=date, num=num)\n res.update(code=ResponseCode.Success, data=test_dict)\n # return res.data,200,{\"token\":\"111\"}\n return res.data", "def test_122001_get_recognize_record_correct(self):\n self.logger.info(\".... Start test_122001_get_recognize_record_correct ....\")\n try:\n with allure.step(\"teststep1: get parameters.\"):\n params = {\"member_id\": self.member_id, \"page_index\": 0, \"page_size\": 1, \"timestamp\": get_timestamp()}\n allure.attach(\"params value\", \"{0}\".format(params))\n self.logger.info(\"data: {0}\".format(params))\n\n with allure.step(\"teststep2: requests http get.\"):\n self.httpclient.update_header({\"authorization\": self.token})\n rsp = self.httpclient.get(self.URI, params=params)\n allure.attach(\"request.headers\", str(rsp.request.headers))\n allure.attach(\"request.url\", str(rsp.request.url))\n self.logger.info(\"request.headers: {}\".format(rsp.request.headers))\n self.logger.info(\"request.url: {}\".format(rsp.request.url))\n\n with allure.step(\"teststep4: assert the response code\"):\n allure.attach(\"Actual response code:\", str(rsp.status_code))\n self.logger.info(\"Actual response code:{0}\".format(rsp.status_code))\n assert rsp.status_code == 200\n rsp_content = rsp.json()\n\n with allure.step(\"teststep5: assert the response content\"):\n allure.attach(\"response content:\", str(rsp_content))\n self.logger.info(\"response content: {}\".format(rsp_content))\n assert rsp_content[\"code\"] == 1\n assert not rsp_content['message']\n assert len(rsp_content['result']['data']) == 1\n assert rsp_content['result']['page']['page_index'] == 0\n assert rsp_content['result']['page']['page_size'] == 1\n assert rsp_content['result']['page']['total_count'] == 2\n assert rsp_content['result']['page']['total_page'] == 2\n assert rsp_content['result']['page']['has_next_page']\n assert not rsp_content['result']['page']['has_previous_page']\n except Exception as e:\n allure.attach(\"Exception: \", \"{}\".format(e))\n self.logger.error(\"Error: exception occur: \")\n self.logger.error(e)\n assert False\n finally:\n self.logger.info(\".... End test_122001_get_recognize_record_correct ....\")\n self.logger.info(\"\")", "def test_get_data(self):\n\n\t\t# Test to go here when best approach is decided for making requests.", "def test_error_in_response(self):\n\n def async_http_client_fetch_patch(http_client, request, callback):\n \"\"\"This function is used to patch\n ```tornado.httpclient.AsyncHTTPClient.fetch``` so that when\n ```ks_util.AsyncAction.async_req_to_key_store``` calls\n ```tornado.httpclient.AsyncHTTPClient.fetch``` this test\n (or this function specifically) can get into the call stream.\"\"\"\n\n response = mock.Mock()\n response.code = httplib.INTERNAL_SERVER_ERROR\n response.error = str(uuid.uuid4()).replace(\"-\", \"\")\n response.body = None\n response.headers = tornado.httputil.HTTPHeaders()\n response.request_time = 24\n\n callback(response)\n\n def on_async_req_to_key_store_done(is_ok, http_status_code=None, body=None):\n \"\"\"Called when ```ks_util.AsyncAction.async_req_to_key_store```\n completes.\"\"\"\n self.assertFalse(is_ok)\n\n self.assertIsNone(http_status_code)\n self.assertIsNone(body)\n\n name_of_method_to_patch = \"tornado.httpclient.AsyncHTTPClient.fetch\"\n with mock.patch(name_of_method_to_patch, async_http_client_fetch_patch):\n aa = ks_util.AsyncAction(type(self)._key_store)\n aa.async_req_to_key_store(\n \"dave\",\n \"GET\",\n None,\n on_async_req_to_key_store_done)", "def test_httpresponse_pass_through(self):\n response = twilio_view(self.response_view)(self.request_post)\n self.assertTrue(isinstance(response, HttpResponse))", "def test_validate_response(self):\n\n\t\texpected_result = True # expected function result\n\n\t\tresponse_obj = requests.Response()\n\t\tresponse_obj.status_code = 200\n\n\t\tresponse = self.calc_obj.validate_response(response_obj)\n\n\t\ttry:\n\t\t\tself.assertEqual(response, expected_result)\n\n\t\tfinally:\n\t\t\ttab = [[response], [expected_result]]\n\t\t\tprint(\"\\n\")\n\t\t\tprint(inspect.currentframe().f_code.co_name)\n\t\t\tprint(tabulate(tab, headers='keys', tablefmt='rst'))\n\t\t\t\n\t\treturn", "def test_get_200(self):\n self.assertEqual(200, self.response.status_code)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
a generic printer that can handle a list of text and print that to screen or a single string
def multi_printer(text, player_name=None): if isinstance(text, list): for line in text: if line == ' ': print '' if player_name is not None: line = replace_player_name(line, player_name) lines = textwrap.wrap(line, CHARS_PER_LINE) for wrapped_line in lines: print wrapped_line elif isinstance(text, basestring): if player_name is not None: text = replace_player_name(text, player_name) lines = textwrap.fill(text, CHARS_PER_LINE) print lines else: print 'Error: did not receive list of strings or string'
[ "def ansiprint(self, *args: str, **kwargs):\n\n new_args = (str(i) if not isinstance(i, str) else i for i in args)\n parts = self.parse(*new_args, aslist=True)\n builtins.print(*parts, **kwargs)", "def print_text(txt):\n print(txt)", "def w(text=''):\n if printing:\n print(text)\n else:\n _handle.write(text + '\\n')", "def _print_plain(arg, p, cycle):\n if _can_print_latex(arg):\n p.text(stringify_func(arg))\n else:\n p.text(IPython.lib.pretty.pretty(arg))", "def print_text_section(my_list):\n def print_text(text):\n \"\"\"\n prints out text on panel \n \n Parameters\n ----------\n text : string\n string to be printed on screen.\n \n Returns\n -------\n None.\n \n \"\"\"\n temp_text = font2.render(text, True, WHITE)\n screen.blit(temp_text, [900, n])\n \n # Starting y coordinate for text\n n = 120\n \n # For loop that prints out text \n for i in my_list:\n print_text(i)\n n= n + 20", "def Print(self,*args,**kwargs):\n print(*args,**kwargs)", "def print_to_stdout(text, reps):\r\n\tfor i in range(reps): # use: range(size), range(begin, end) or range(begin, end, step_size)\r\n\t\tprint(text)", "def print(self, *args, **kwargs) -> str:\n self.console.print(*args, highlight=False, **kwargs)\n return self.console.export_text(clear=True, styles=False)", "def print(self, *args):\n print(*args, file=self.output_file)", "def prints(self, data, base=None):\r\n return self.write(self._process(data, base))", "def print_templates():\n\n print(\"\\n--> No subtitle:\\n\")\n print(DEFAULT_TEMPLATE)\n print(\"\\n--> With subtitle:\\n\")\n print(DEFAULT_TEMPLATE_SUBTITLE)\n print(\"\\n--> Supported placeholders:\")\n print(\" - banner: \" + PH_BANNER)\n print(\" - subtitle: \" + PH_SUBTITLE)\n print(\" - PS1: \" + PH_PS1)", "def print(self, *args):\n print(*args, file=self.dump_file)", "def print_text(widget, data):\n text_buffer = data.get_buffer() # data is a gtk.TextView widget\n text = text_buffer.get_text(text_buffer.get_start_iter(), text_buffer.get_end_iter(), True)\n print(text)", "def print_text(sobj):\n print_text(sobj.read())", "def print_items(items):\n\tfor item in items:\n\t\tprint(item)", "def double_text(text_to_print):\n return text_to_print + text_to_print", "def _print_mode(self, *obj, file=None, end=\"\\n\", sep=\" \", flush=False, mode=\"n\"):\n if file is None:\n file = self.file\n ostr = []\n for x in obj:\n if isinstance(x, str):\n ostr.append(x)\n else:\n ostr.append(repr(x))\n if mode == \"n\":\n print(*ostr, sep=sep, flush=flush, file=file, end=end)\n else:\n print(\n self.fmt(sep.join(ostr), mode), sep=sep, flush=flush, file=file, end=end\n )", "def print(self, *args, sep=' ', end='\\n', file=None): # known special case of print\n pass", "def formatPrintString(self, value):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
searchs the string for and inserts the player_name passed in returns the string
def replace_player_name(text, player_name): sub_string = "<playername>" return string.replace(text, sub_string, player_name)
[ "def find_player(search_str, ap, pp):\n # clean periods, since they aren't consistent between sources\n search_str = search_str.replace(\".\", \"\")\n # check if any of the search words are in the full name\n # TODO: incorporate the close matches in here as well\n checkfunc = (\n lambda name: all(\n [\n sw in name.lower().replace(\".\", \"\")\n for sw in search_str.lower().split(\" \")\n ]\n )\n or SequenceMatcher(\n lambda c: c in \"._ -\", search_str.lower(), name.lower()\n ).ratio()\n > 0.6\n )\n picked_players = pp.index.get_level_values(\"player\")\n filt_mask = picked_players.map(checkfunc) if not pp.empty else None\n filtered_pp = pp[filt_mask] if not pp.empty else pp\n if not filtered_pp.empty:\n print(\"\\n Picked players:\")\n print(filtered_pp)\n\n available_players = ap.index.get_level_values(\"player\")\n checked_avail = available_players.map(checkfunc)\n\n filt_mask = checked_avail if not ap.empty else None\n filtered_ap = ap[filt_mask] if not ap.empty else ap\n if filtered_ap.empty:\n print(\"\\n Could not find any available players.\")\n else:\n print(\"\\n Available players:\")\n print(filtered_ap)", "def searchPlayer(wp_page='', player_name=''):\n\n\tif player_name:\n\t\tplayer_name = player_name.replace(' ', '+')\n\t\tsearchitemurl = 'https://int.soccerway.com/search/players/?q=%s' % (player_name)\n\t\traw = base.getURL(searchitemurl)\n\t\tplayers = re.findall(r'<td class=\"player\"><a href=\"[\\/\\-\\w]*\" class=\"[\\_\\s\\/\\-\\w]*\">.*</a></td>', raw, re.IGNORECASE)\n\t\tnames = re.findall(r'<td class=\"player\"><a href=\"[\\/\\-\\w]*\" class=\"[\\_\\s\\/\\-\\w]*\">(.*)</a></td>', raw, re.IGNORECASE)\n\n\t\tplayer_name = player_name.replace('+', ' ')\n\t\tmatches = list()\n\t\ti = 0\n\t\tfor name in names:\n\t\t\tflag = 'y'\n\t\t\tname = unidecode(name)\n\t\t\tname = re.split(r'\\s|\\-', name)\n\t\t\tname_parts = re.split(r'\\s|\\-', player_name)\n\n\t\t\tfor name_part in name_parts:\n\t\t\t\tname_part = unidecode(name_part)\n\t\t\t\tif name_part != 'career' and name_part != 'statistics' and '(' not in name_part and ')' not in name_part and not name_part.isnumeric():\n\t\t\t\t\tif name_part not in name:\n\t\t\t\t\t\tflag = 'n'\n\t\t\t\t\t\tbreak\n\n\t\t\tif flag == 'n':\n\t\t\t\ti += 1\n\t\t\t\tcontinue\n\n\t\t\tmatches.append(players[i])\n\n\t\tif len(matches) == 1:\n\t\t\treturn matches[0]\n\t\telif len(matches) > 1:\n\t\t\tfinal_list = list()\n\t\t\tfor text in matches:\n\t\t\t\tsoccerway_id = re.findall(r'<td class=\"player\"><a href=\"/players/([\\/\\-\\w]*)\" class=\"[\\_\\s\\/\\-\\w]*\">.*</a></td>', text, re.IGNORECASE)\n\t\t\t\t\n\t\t\t\tif soccerway_id:\n\t\t\t\t\tsearchitemurl = 'https://int.soccerway.com/players/%s' % (soccerway_id[0])\n\t\t\t\t\traw = base.getURL(searchitemurl)\n\t\t\t\t\t\n\t\t\t\t\tbday_site = re.findall(r'<dd data-date_of_birth=\"date_of_birth\">([\\w\\s]*)</dd>', raw, re.IGNORECASE)\n\t\t\t\t\tbday_site[0] = (bday_site[0].split())\n\t\t\t\t\tbday_site = search_patterns.val_parser(code=2, found_items=bday_site)\n\t\t\t\t\tbday_wp = search_patterns.date_val(page_text=wp_page.text, word='birth_date')\n\t\t\t\t\n\t\t\t\t\tif bday_site == bday_wp:\n\t\t\t\t\t\tfinal_list.append(text)\n\n\t\t\tif len(final_list) == 1:\n\t\t\t\treturn final_list[0]\n\t\t\telse:\n\t\t\t\treturn ''\n\n\treturn ''", "def format_player_name_for_sports_ref(player_name):\n formatted_name = player_name.replace(\" III\", \"\")\n formatted_name = formatted_name.replace(\".\", \"\")\n formatted_name = formatted_name.replace(\"'\", \"\")\n formatted_name = formatted_name.replace(\" \", \"-\")\n formatted_name = formatted_name.lower()\n name_split = formatted_name.split(\" \")\n\n return formatted_name", "def name(player):\n return player['name']", "def get_player_name():\n\n return player.get(\"player_name\")", "def _add_player(self):\n nba_stat_local = [\"FT\", \"MIN\", \"BL\", \"3P\", \"TOT\", \"FG\", \"3PA\",\n \"DR\", \"OR\", \"TO\", \"PF\", \"PTS\", \"FGA\", \"A\", \"ST\"]\n help_text = 'Enter player name and stats: %s' % self.stats\n player = req_input(help_text=help_text)\n new_string = []\n if not player:\n print \"No player name. Exit.\"\n else:\n new_player = player.split(\" \")\n if len(new_player) == 15: # Not enough arguments or Name is skipped\n print \"No player name or incorrect 15-th stats. Try again.\"\n elif len(new_player) > 15:\n nba_stat_local.insert(0, \"PLAYER FULL NAME\")\n player_name = \" \".join(map(str, new_player[:-15:]))\n player_stat = new_player[-15:]\n player_stat.insert(0, player_name)\n new_dict = dict(zip(nba_stat_local, player_stat))\n new_dict.update(CONSTDATA)\n fieldnames = get_csv_header(self.fpath)\n for header_items in fieldnames:\n one_header = new_dict.get(header_items)\n new_string.append(one_header)\n else:\n new_string = [\"\"] * 25\n new_string.insert(2, player)\n # Append a new player in csv file\n add_csv_string(fpath=self.fpath, val='a', new_string=new_string)\n print \"Player %s has been added.\" % player", "def store_name_match(self, match_id, name, account):\n self.c.execute('SELECT * FROM player WHERE (name = ?) AND (matchid = ?)', (name, match_id))\n results = self.c.fetchone()\n if results is None:\n self.c.execute('INSERT INTO player (name, matchid, account) VALUES (?,?,?)', (name, match_id, account))\n self.conn.commit()\n logging.log(logging.INFO, 'Sotring a name and macth id in player table: %s and %s', name, match_id)", "def get_player_abbr(player_name):\n flag = 0\n name_string = \"\"\n if player_name != \"\":\n name_string += player_name[0]\n while True:\n if player_name[1] == \".\":\n name_string += \".\"\n name_string += player_name[2]\n break\n else:\n break\n\n name_string += \".\"\n for i in player_name:\n if i == \" \":\n flag = 1\n if flag == 1:\n name_string += i\n name_string = name_string.strip()\n if \"Jr.\" not in name_string:\n name_string = name_string.strip(\".\")\n return name_string\n else:\n return \"\"", "def findPlayerName(dataPlayersLeague, playerId):\n for each in dataPlayersLeague:\n if each[\"personId\"] == playerId:\n return each[\"firstName\"] + \" \" + each[\"lastName\"]", "def set_player_name(name):\n\n player[\"player_name\"] = name", "def _format_name(player_name):\r\n player_name = _catch_name_exceptions(player_name)\r\n formatted_str = \"\"\r\n for i in range(len(player_name)):\r\n if player_name[i] == \" \":\r\n formatted_str += \"-\"\r\n elif player_name[i] == \".\":\r\n pass\r\n else:\r\n formatted_str += player_name[i]\r\n return formatted_str", "def find_pokemon_name(self, text):\n return self.__pkmMan.find_pokemon_name(text)", "def onChangePlayerName(self, inEvent: RealTimeEvent):\n\n player_id = inEvent.parameters['player_id']\n player_location = inEvent.parameters['player_location']\n name = inEvent.parameters['name']\n\n if player_id in self.server.PlayerInterface.onlinePlayerWithId:\n player = self.server.PlayerInterface.onlinePlayerWithId[player_id]\n if name not in player.getAliases():\n player.addAlias(name)\n self.realTimeRound.changePlayerName(player_id, player_location, name)", "def set_player_name(self, player):\r\n self.__name = player", "async def player_search(ctx, *player_name, silent=False):\n player_name = \" \".join(player_name)\n\n logger = get_logger_with_context(ctx)\n logger.info(\"Looking up stats for '%s' \", player_name)\n\n if not player_name:\n await ctx.send(\"Please specify an Epic username after the command, \"\n \"ex: `!hunted LigmaBalls12`\")\n return\n\n try:\n await fortnite_tracker.get_player_stats(ctx, player_name, silent)\n except Exception as e:\n logger.warning(e, exc_info=should_log_traceback(e))\n\n # Fortnite API stats are unnecessary in silent mode\n if silent:\n return\n\n logger.warning(f\"Falling back to Fortnite API for '{player_name}'..\")\n await fortnite_api.get_player_stats(ctx, player_name)", "def store_name(self, name):\n self.c.execute('INSERT INTO player (name) VALUES (?)', (name))\n self.conn.commit()\n logging.info(logging.INFO, 'inserting name: %s', name)", "def __fix_player_names(self,team_playersheet): \n \n # replace Jr for Jr.\n team_playersheet[\"Player\"] = team_playersheet[\"Player\"].str.replace(\"Jr$\",\"Jr.\")\n #TODO make the following more all encompassing\n team_playersheet[\"Player\"] = team_playersheet[\"Player\"].str.replace(\"Bruce Brown Jr.\",\"Bruce Brown\")\n team_playersheet[\"Player\"] = team_playersheet[\"Player\"].str.replace(\"Ellie\",\"Elie\")\n team_playersheet[\"Player\"] = team_playersheet[\"Player\"].str.replace(\"Reddick\",\"Redick\")\n\n return team_playersheet", "def get_player_name(self, player_number):\n p, q = self.players\n return p if self.__piece_type__(p) == player_number else q", "def parse_player(player):\n if player.lower() == \"x\":\n return \"X\"\n elif player.lower() == \"o\":\n return \"O\"\n else:\n print(\"Player must be X or O\")\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
initializes the game windows as new windows and initializes some color pairs
def init_windows(self, stdscr): if USE_CURSES and self.terminal_size(): self.back_win = stdscr self.fill_back() self.main_win = curses.newwin(MAIN_WIN_ROWS, MAIN_WIN_COLS, 2, 2) self.input_win = curses.newwin(INPUT_WIN_ROWS, INPUT_WIN_COLS, 33, 2) self.stat_win = curses.newwin(STAT_WIN_ROWS, STAT_WIN_COLS, 14, 89) self.time_win = curses.newwin(TIME_WIN_ROWS, TIME_WIN_COLS, 2, 89) self.init_colors()
[ "def setup(self):\n self.nog = nog.NumberOfGames(self.window)\n self.sd = sd.SavedData()\n self.create()\n self.window.geometry('265x350')\n self.window.config(bg='White')\n self.window.title('Tic-Tac-Toe')\n self.window.mainloop()", "def init_colors(self):\n\t\tcurses.init_pair(1, curses.COLOR_RED, curses.COLOR_BLACK)\n\t\tcurses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_BLACK)\n\t\tcurses.init_pair(3, curses.COLOR_BLUE, curses.COLOR_BLACK)\n\t\tcurses.init_pair(4, curses.COLOR_YELLOW, curses.COLOR_BLACK)\n\t\tcurses.init_pair(5, curses.COLOR_CYAN, curses.COLOR_BLACK)", "def create_windows(self):\n self.left = TaskWindow(self.MAX_WIN_HEIGHT, self.MAX_WIN_WIDTH, 0, 0, \"Backlog\")\n self.center = TaskWindow(self.MAX_WIN_HEIGHT, self.MAX_WIN_WIDTH, 0, self.MAX_WIN_WIDTH, \"In Progress\")\n self.right = TaskWindow(self.MAX_WIN_HEIGHT, self.MAX_WIN_WIDTH, 0, 2 * self.MAX_WIN_WIDTH, \"Done\")\n self.control = ControlWindow(self.control_lines, curses.COLS, self.MAX_WIN_HEIGHT, 0)\n self.scr.refresh()", "def _init_window(\n self,\n ):\n if not self.initialized_window:\n pygame.init()\n pygame.display.set_caption(\"Traders\")\n self.window = pygame.display.set_mode([self.window_size] * 2)\n self.screen = pygame.display.get_surface()\n self.font = pygame.font.SysFont(\"monospace\", 15)", "def __init__(self, window):\n self.window = window\n self.window.title(\"Sorting Algorithm Visualizer\")\n self.window.geometry(\"800x450\")\n self.window.minsize(800, 450)\n self.window.maxsize(800, 450)\n self.window.config(bg = \"#152e57\")", "def makeWindow(self, size, pos, color):\n window = pygame.Surface(size)\n window.fill(color)\n self.windows[window] = pos\n self.screen.blit(window, pos)", "def create_windows(self):\n\n # implemented in sub classes", "def _create_palette_win(self, col):\n palette_win = tk.Toplevel(self.root)\n palette_win.wm_title('Color Chooser #' + str(col))\n palette_win.resizable(False, False)\n palette_pick = tk.Canvas(palette_win, width=256, height=64, bg='#FFFFFF')\n palette_pick.grid(column=0, row=0, sticky=\"n\")\n palette_pick_action = lambda event : self._palette_click( event, col )\n palette_pick.bind(\"<Button-1>\", palette_pick_action)\n palette_close = ttk.Button(palette_win, text = 'Close', command = palette_win.destroy)\n palette_close.grid(column=0, row=1, sticky=\"s\")\n # Draws the colors blocks for selecting from the NES palette\n for i, color in enumerate(nes_palette):\n x = (i % PALETTE_SPAN) * PALETTE_BOXSIZE\n y = (i // PALETTE_SPAN) * PALETTE_BOXSIZE\n palette_pick.create_rectangle(x,y,\n x+PALETTE_BOXSIZE-1,y+PALETTE_BOXSIZE-1,\n fill=color, outline=color)", "def initializeGameRelated(self):\n\n self._btn_close_id=gui.addWidget(Button(self.index,150,35,(255,255,255),\n 50,360,'Close Window','images/button_background.png',14,\n button_close_parent))\n\n self._txt_id=gui.addWidget(TextWidget(self.index,140,250,(255,255,255),\n 55,100,'',13,'black',2))", "def initGame(self, L):\r\n self.configureZiseOfTable(L)\r\n self.configureBallsInInt()", "def display_setup(self, window):\n ## TO DO: GET ACTUAL PARAMETERS FOR THESE\n\n scnWidth = self.screen_width\n scnHeight = self.screen_height\n\n window.mouseVisible = False\n\n screen_share = EyeLinkCoreGraphicsPsychoPy(self.tracker, window)\n pylink.openGraphicsEx(screen_share)\n# pylink.setTargetSize(int(surf.get_rect().w/150), int(surf.get_rect().w/500)); \n return window", "def initialise_ui(self):\n \n #Add event boxes to codel color chooser\n self.codelColors = [gtk.EventBox() for color in piedit.colors.all_colors()]\n for (color,(x,y),i) in zip(piedit.colors.all_colors(),\n ((x,y) for x in xrange(7) for y in xrange(3)),\n xrange(len(self.codelColors))): \n event_box = self.codelColors[i]\n event_box.set_events(gtk.gdk.BUTTON_PRESS_MASK)\n event_box.visible = True\n self.gladeui.get_widget(\"codelColorsTable\").attach(\n event_box,\n x,\n x+1,\n y,\n y+1,\n xoptions=gtk.EXPAND|gtk.FILL, \n yoptions=gtk.EXPAND|gtk.FILL, \n xpadding=1, \n ypadding=1)\n event_box.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse(color))\n event_box.set_size_request(-1,30)\n event_box.default_color=color\n event_box.connect(\"button_press_event\", self.handlers.on_codelColorEventBox_clicked) \n event_box.show()\n \n #Initialise image \n program_table = self.gladeui.get_widget(\"programTable\")\n program_table.add_events(gtk.gdk.BUTTON_PRESS_MASK)\n program_table.connect(\"button_press_event\", self.handlers.on_programTable_button_press_event)\n self.clear_image(self.width,self.height)", "def new_win(color1,color2):\n my_font = pygame.font.SysFont(\"comicsansms\",90)\n text = my_font.render(\"GAME OVER\",True, (85,107,47))\n text_rect = text.get_rect()\n text_rect.center = (300,160)\n screen.blit(text, text_rect)\n dark_green = (85,107,47)\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit()\n button1(\"Try Again\",90,260,115,27,color2,color1,reset_game)\n button1(\"Quit\",435,260,65,27,color2,color1,quitgame)\n game_wall()\n pygame.display.update()", "def new_game():\n global buttons\n del buttons[:]\n screen.clear()\n screen.bgcolor(\"#4A4A4A\")\n\n # reset the game state and draw it out\n reset_game_state()\n draw_board()\n penaltyCount()\n\n # bind the event handler\n screen.onclick(onclick_board_handler)\n screen.onkeyrelease(save_state, \"s\")\n screen.onkeyrelease(load_state, \"l\")\n screen.listen()", "def setup_window():\n scr = turtle.Screen()\n t = turtle.Turtle()\n t.speed(0)\n t.fillcolor(\"chocolate\")\n return t, scr", "def begin_new_game(self):\n if self.DELAY:\n self.message.config(text=\"Creating new game...\", fg=\"purple\")\n\n # variable clearing\n self.c.itemconfig(\"squares\", width=1, outline=\"black\")\n self.quux = None # temporary storage\n self.pieces = {\"black\": [], \"red\": []} # first list is black's pieces, then red's pieces.\n self.piece = None\n self.piece_square = None\n self.square = ()\n self.count = -1\n self.oldmessage_info = [\"\", \"\"]\n self.c.delete(\"pieces\")\n self.jumps = [[], []]\n self.jump_made = None\n self.c.delete(\"win_text\")\n self.history = []\n\n # flag setting\n self.got_move = 0\n self.got_piece = 0\n self.end_now = 0\n self.add_mode = 0\n self.remove_mode = 0\n\n self.make_pieces(\"black\", self.DELAY)\n self.make_pieces(\"red\", self.DELAY)\n\n self.moving = \"black\" # reversed since setup_move will switch it.\n\n if self.DEBUG_BIG_THINGS:\n print \"self.pieces: \", self.pieces\n\n self.MoveLoop()", "def init_window(self):\n\n self.setWindowTitle(self.video_name)\n self.setWindowIcon(QIcon(\"../resources/diamond_twist.png\"))\n self.setStyleSheet(wndw_style)", "def CreateExteriorWindowComponents(self):\r\n self.CreateMenu()\r\n self.CreateStatusBar()", "def initializeGameRelated(self):\n\n \"\"\"Initialize subwidgets\"\"\"\n self._btn_newgame_id=gui.addWidget(Button(self.index,150,35,\n (255,255,255),50,35,'New Game','images/button_background.png',14,\n button_close_parent))\n\n self._btn_save_id=gui.addWidget(Button(self.index,150,35,(255,255,255),\n 50,100,'Save Game','images/button_background.png',14,\n button_close_parent))\n\n self._btn_load_id=gui.addWidget(Button(self.index,150,35,(255,255,255),\n 50,165,'Load Game','images/button_background.png',14,\n button_close_parent))\n\n self._btn_fullscr_id=gui.addWidget(Button(self.index,150,35,\n (255,255,255),50,230,'Toggle on/off fullscreen',\n 'images/button_background.png',14,pygame.display.toggle_fullscreen))\n \n self._btn_exit_id=gui.addWidget(Button(self.index,150,35,(255,255,255),\n 50,295,'Exit Game','images/button_background.png',14,sys.exit))\n \n self._btn_close_id=gui.addWidget(Button(self.index,150,35,(255,255,255),\n 50,360,'Close Window','images/button_background.png',14,\n button_close_parent))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
erases the main game window, then either writes the list or the string to the main window wrapping the text to fit the last row written to is stored in self
def write_main(self, text, player_name=None, row=1, col=1): self.main_win.erase() if isinstance(text, list): for line in text: if line == " ": row += 1 if player_name is not None: line = replace_player_name(line, player_name) self.main_win.addstr(row, col, line, curses.A_BOLD) row +=1 if row >= MAIN_WIN_ROWS: break elif isinstance(text, basestring): if player_name is not None: text = replace_player_name(text, player_name) lines = textwrap.wrap(text, CHARS_PER_LINE) for line in lines: self.main_win.addstr(row, col, line, curses.A_BOLD) row += 1 if row >= MAIN_WIN_ROWS: break else: self.main_win.addstr('Error: did not receive list of strings or string') self.main_row = row
[ "def display(self):\n self.window.erase()\n for idx, item in enumerate(self.items[self.top:self.top + self.max_lines]):\n # Highlight the current cursor line\n if idx == self.current:\n self.window.addstr(idx, 0, item, curses.color_pair(2))\n else:\n self.window.addstr(idx, 0, item, curses.color_pair(1))\n self.window.refresh()", "def update_window(self):\n if self.window is not None:\n # First, clean-up\n actr.clear_exp_window()\n\n # Then, add new elements\n if self.phase == \"fixation\":\n item = actr.add_text_to_exp_window(self.window, \"+\",\n x = 400, y = 300,\n color = \"black\")\n \n elif self.phase == \"stimulus\":\n color = self.current_trial.color\n word = self.current_trial.word\n item = actr.add_text_to_exp_window(self.window, word,\n x=395, y= 300,\n color = color)\n\n for i, col in enumerate(COLOR_MAPPINGS):\n item = actr.add_text_to_exp_window(self.window,\n COLOR_MAPPINGS[col],\n x = 600 + i * 50,\n y = 500,\n color = col)\n print(type(COLOR_MAPPINGS))\n\n elif self.phase == \"done\":\n color = self.current_trial.color\n word = self.current_trial.word\n item = actr.add_text_to_exp_window(self.window, \"done\",\n x=395, y= 300,\n color = \"black\")", "def write_main_bottom(self, text):\n\t\tif len(text) > MAIN_WIN_COLS-2: text = text[:MAIN_WIN_COLS-2]\n\t\tblank_line = ' '*40\n\t\tself.main_win.addstr(MAIN_WIN_ROWS-1, ui.COL, blank_line)\n\t\tself.main_win.addstr(MAIN_WIN_ROWS-1, ui.COL, text, curses.color_pair(4))\n\t\tself.main_win.refresh()", "def update_text(self):\n\t\t#Get the desired min & max row indices\n\t\ttop = self.view.y\n\t\tbottom = self.view.y + TERMINAL_ROWS\n\t\t#Get the desired min & max column indices\n\t\tleft = self.view.x\n\t\tright = self.view.x + TERMINAL_COLS\n\t\t#Load the map background into the text buffer\n\t\tfor y, row in enumerate(self.map.text[top:bottom]):\n\t\t\t#self.view.text[y] = self.map.text[y][left:right]\n\t\t\tself.view.text[y] = row[left:right]\n\n\t\t#Load the player avatar into the text buffer\n\t\t#line_list = list(self.view.text[self.player.y])\n\t\t#line_list[self.player.x] = self.player.avatar\n\t\t#self.view.text[self.player.y] = \"\".join(line_list)", "def write_main_mid(self, text):\n\t\trow = self.main_row + 1\n\t\tlines = textwrap.wrap(text, CHARS_PER_LINE)\n\t\tfor line in lines:\n\t\t\tself.main_win.addstr(row, ui.COL, line, curses.A_BOLD)\n\t\t\trow += 1\n\t\t\tif row >= MAIN_WIN_ROWS: break\n\t\tif row < MAIN_WIN_ROWS:\n\t\t\tblank_line = \" \"*int(MAIN_WIN_COLS-2)\n\t\t\tfor _ in range(row, MAIN_WIN_ROWS-1):\n\t\t\t\tself.main_win.addstr(row, ui.COL,blank_line)\n\t\tself.main_row = row\n\t\tself.main_win.refresh()", "def write_main_artifact(self, text):\n\t\trow = self.main_row + 1\n\t\tif isinstance(text, list):\n\t\t\tfor line in text:\n\t\t\t\tif line == \" \": row += 1\n\t\t\t\tself.main_win.addstr(row, ui.COL, line, curses.A_BOLD)\n\t\t\t\trow +=1\n\t\t\t\tif row >= MAIN_WIN_ROWS: break", "def draw_end_screen(self):\n\n pyxel.cls(col=COL_FINISH)\n\n display_text = TEXT_FINISH[:]\n\n if self.l_score >= WIN_CONDITION:\n winner = \"The LEFT player!\"\n else:\n winner = \"The RIGHT player!\"\n display_text.insert(1, winner)\n for i, text in enumerate(display_text):\n y_offset = (FONT_HEIGHT + 2) * i\n text_x = self.center_text(text, WIDTH)\n pyxel.text(text_x, HEIGHT_FINISH + y_offset, text, COL_FINISH_TEXT)", "def urwid_main(game_state, text_lines, screen_refresh_speed=0.05):\n\n # uc_u = '\\u25B2'\n \"\"\"\n uc_u = '\\u2191'\n uc_d = '\\u2193'\n uc_l = '\\u2190'\n uc_r = '\\u2192'\n\n uc_ul = '\\u2196'\n uc_ur = '\\u2197'\n uc_dr = '\\u2198'\n uc_dl = '\\u2199'\n \"\"\"\n\n color_palette = [\n (\"banner\", \"\", \"\", \"\", \"#fff\", \"g35\"),\n (\"statusbar\", \"white\", \"black\"),\n (\"highlight\", \"white\", \"\", \"\", \"g0\", \"g35\"),\n (\"white\", \"white\", \"\", \"\", \"g0\", \"g35\"),\n (\"inside\", \"\", \"\", \"\", \"g0\", \"g35\"),\n (\"outside\", \"\", \"\", \"\", \"g0\", \"g35\"),\n (\"bg\", \"\", \"\", \"\", \"g35\", \"#fff\"),\n ]\n\n # note that these are ordered in Python 3.6+, this assumes you are running 3.6+ !!!\n arrows = {}\n arrows[\"n\"] = \"n\"\n arrows[\"e\"] = \"e\"\n arrows[\"s\"] = \"s\"\n arrows[\"w\"] = \"w\"\n arrows[\"nw\"] = \"nw\"\n arrows[\"ne\"] = \"ne\"\n arrows[\"sw\"] = \"sw\"\n arrows[\"se\"] = \"se\"\n\n exit_string = \" \"\n for k, v in arrows.items():\n if game_state.exits.get(k):\n exit_string += v\n else:\n exit_string += \" \" * len(v) # preserve spacing from glyph\n exit_string += \" \" # separator whitespace\n\n # imagine a function that adds a space or the arrow depending on\n # whether the compass arrow last received game state\n # currently just used to display them all as a placeholder\n\n fixed_size_for_now = 1000\n main_window_buffer_size = 40\n main_window_stack = StackedWidget()\n\n # must be initalized with an empty string\n # these should probably go in a map instead of hardcoded...\n # probably want to map N xml-defined tags to M message deques\n story_window = ScrollBar(Scrollable(urwid.Text(\"\")))\n tcp_window = ScrollBar(Scrollable(urwid.Text(\"\")))\n chat_window = ScrollBar(Scrollable(urwid.Text(\"\")))\n\n main_window_stack.push_widget(story_window)\n main_window_stack.push_widget(tcp_window)\n main_window_stack.push_widget(chat_window)\n\n input_box = urwid_readline.ReadlineEdit(\n \"> \", \"\"\n ) # pretty sure urwid_readline package needs Python3\n\n status_line = urwid.Text(game_state.status_line_string)\n\n mainframe = urwid.Pile(\n [\n (\n \"weight\",\n fixed_size_for_now,\n urwid.Filler(\n main_window_stack, height=main_window_buffer_size, valign=\"bottom\"\n ),\n ),\n (\"fixed\", 1, urwid.Filler(status_line, \"bottom\")),\n (\"fixed\", 1, urwid.Filler(input_box, \"bottom\")),\n ],\n focus_item=2,\n )\n\n # these were for the terminal\n def set_title(widget, title):\n mainframe.set_title(title)\n\n def quit(*args, **kwargs):\n pass # this method is never called\n\n def unhandled_input(txt, key):\n \"\"\"\n much of this input should be handled in the pile or widgets inside the pile\n q: why is this called unhandled input if it is the input handler??\n a: ... urwid thing, this can probably be changed to whatever is appropriate, just use care\n \"\"\"\n if key in (\"`\"):\n if main_window_stack.current + 1 >= main_window_stack.widget_count:\n main_window_stack.current = 0\n else:\n # don't use the fake setter, it's doing some weird modulo stuff\n # maybe after reviewing the module code more...\n main_window_stack.current += 1\n\n if key in (\"tab\"):\n # rudimentary focus bouncer for now\n # ideally focus bounce will toggle buffers in the future\n if mainframe.focus_position == 2:\n mainframe.focus_position = 0\n else:\n mainframe.focus_position = 2\n return\n\n if key in (\"enter\"):\n\n game_state.history_scroll_mode = False # toggle history scroll mode off\n\n if len(txt.edit_text) == 0:\n \"\"\" ignore an empty command\n \"\"\"\n return\n\n submitted_command = txt.edit_text\n\n # used to have a command splitter here, decided not to use it\n game_state.input_history.append(submitted_command)\n game_state.command_queue.put(submitted_command.encode(\"utf-8\"))\n\n txt.set_edit_text(\"\")\n txt.set_edit_pos(0)\n\n return\n\n if key in (\"up\", \"down\"):\n\n # deal with the 0 history case here\n if len(game_state.input_history) == 0:\n return\n\n # enter history scroll mode until the user presses enter\n if game_state.history_scroll_mode == False:\n game_state.history_scroll_mode = True\n game_state.input_history_counter = len(game_state.input_history) - 1\n\n # don't do this if you just set it to true! (elif)\n elif game_state.history_scroll_mode == True:\n\n if key in (\"up\"):\n if game_state.input_history_counter > 0:\n game_state.input_history_counter -= 1\n\n if key in (\"down\"):\n if (\n game_state.input_history_counter\n < len(game_state.input_history) - 1\n ):\n game_state.input_history_counter += 1\n\n input_box.set_edit_text(\n game_state.input_history[game_state.input_history_counter]\n )\n input_box.set_edit_pos(len(txt.edit_text))\n return\n\n if key in (\"left\"):\n input_box.set_edit_text(\"\")\n input_box.set_edit_pos(len(txt.edit_text))\n return\n\n if key in (\"right\"):\n \"\"\"\n interestingly, because of urwid-readline, i can use right and left arrows\n but only when there is already text on the line, and not on the far edges\n so on the far left, a left key will trigger this\n on the far right, a right key will trigger unknown key: right\n \"\"\"\n # need the mutex because this uses a function of the underlying deque\n # see: https://stackoverflow.com/a/6518011\n with game_state.rt_command_queue.mutex:\n game_state.rt_command_queue.queue.clear()\n return\n\n # not working\n if key in (\"ctrl q\", \"ctrl Q\"):\n # raise urwid.ExitMainLoop()\n # quit()\n pass\n\n # input_box.set_edit_text(\"unknown key: \" + repr(key))\n # input_box.set_edit_pos(len(txt.edit_text))\n return\n\n \"\"\"\n # supposed to fix focus loss, i don't have that issue yet\n # and it may be solved where i set handle_mouse=False in MainLoop\n def mouse_event(self, size, event, button, col, row, focus):\n pass\n \"\"\"\n\n # urwid.connect_signal(term, 'title', set_title)\n # urwid.connect_signal(term, 'closed', quit)\n\n # reference: http://urwid.org/reference/main_loop.html\n loop = urwid.MainLoop(\n mainframe,\n color_palette,\n handle_mouse=False,\n unhandled_input=lambda key: unhandled_input(input_box, key),\n )\n\n def refresh_screen(game_state, loop):\n # view_lines_buffer = list() # a buffer of lines sent to the terminal\n while True:\n # ideally we could just check if loop is running\n # is there a data flag on loop we can pause until is True (loop.run() started)\n\n # do this first so that the urwid MainLoop 'loop' exists! otherwise too fast\n # it would be better to kick this off inside loop.run I think\n time.sleep(screen_refresh_speed)\n\n # lets test this somewhere else...\n if game_state.quit_event.is_set():\n # from: https://stackoverflow.com/a/7099229/1693693\n os.kill(os.getpid(), signal.SIGINT) # give SIGINT to main for cleanup\n # TODO: raise doesn't interrupt main, not working, explore later\n # raise urwid.ExitMainLoop()\n\n # set character name\n game_state.status_line_contents[\"character_firstname\"] = game_state.character_firstname\n\n # calculate remaining roundtime\n current_roundtime = int(game_state.roundtime - game_state.time)\n if current_roundtime < 0:\n current_roundtime = 0\n if current_roundtime < 10:\n # pad < 10\n game_state.status_line_contents[\"roundtime\"] = \" \" + str(current_roundtime)\n else:\n # don't pad > 10, note, for roundtimes 100+ there will be a shift in the UI. #wontfix\n game_state.status_line_contents[\"roundtime\"] = \"\" + str(current_roundtime)\n\n exit_string = \"\"\n for k, v in arrows.items():\n if game_state.exits.get(k):\n exit_string += v\n else:\n exit_string += \" \" * len(v) # preserve spacing from glyph\n exit_string += \" \" # separator whitespace\n\n game_state.status_line_contents[\"exit_string\"] = exit_string\n\n # show the roundtime stable indicator if both time and roundtime are reported\n # this will be false only when the displayed roundtime is based on projected time\n # (game_state.time is projected time)\n if game_state.reported_time >= game_state.roundtime:\n game_state.status_line_contents[\"roundtime_stable\"] = \".\"\n else:\n game_state.status_line_contents[\"roundtime_stable\"] = \" \"\n\n # format the status line with the current content values\n status_line_output = game_state.status_line_string.format(**game_state.status_line_contents)[:80]\n # set the status line\n mainframe.contents[1][0].original_widget.set_text(\n (\"statusbar\", status_line_output)\n )\n\n # fill up the urwid main view text\n if not text_lines.empty():\n extend_view_buffer(game_state, text_lines)\n\n # this target is one below main_window so lets try that instead\n # mainframe is the pile, contents[0] is the first item\n # scrollable_textbox = mainframe.contents[0][0].original_widget.current_widget._original_widget\n # this one is dynamic based on active stacked window\n current_main_window = mainframe.contents[0][\n 0\n ].original_widget.current_widget._original_widget\n # scrollable_textbox = story_window._original_widget\n\n # we can use python names instead of drilling down...\n # - this is critical to future urwid organization\n # the contents object is a list of (widget, option) tuples\n # http://urwid.org/reference/widget.html#urwid.Pile\n # apparently it will not take a deque, so coerce to a list\n story_window._original_widget._original_widget.set_text(\n list(game_state.urwid_views[\"urwid_main_view\"])\n )\n tcp_window._original_widget._original_widget.set_text(\n list(game_state.urwid_views[\"urwid_tcp_view\"])\n )\n chat_window._original_widget._original_widget.set_text(\n list(game_state.urwid_views[\"urwid_chat_view\"])\n )\n\n # MUST - scroll the active window\n # scroll unless item 0 is in focus - is item 0 the filler?\n if mainframe.focus_position != 0:\n # set and record the most recent position\n current_main_window._original_widget._invalidate # invalidate the visible text widget cache\n current_main_window.set_scrollpos(-1)\n game_state.urwid_scrollbar_last = current_main_window.get_scrollpos()\n\n loop.draw_screen()\n\n # refresh the screen in its own thread.\n # this camn probably get moved to main() in pylanthia.py\n refresh = threading.Thread(target=refresh_screen, args=(game_state, loop))\n refresh.daemon = True # kill the thread if the process dies\n refresh.start()\n\n loop.run()", "def create_text_window(self):\n win = self.tkinter.Toplevel()\n if ON_WINDOWS:\n win.attributes(\"-toolwindow\", 1)\n win.title('Details')\n win.protocol('WM_DELETE_WINDOW', self.when_message_window_x)\n self.message_window = win\n self.text = self.tkinter.Text(win)\n self.message_window.withdraw()", "def draw(self, list, selected=-1, attr=curses.A_NORMAL):\n\n # draw generics\n super().draw()\n\n # get the window measurements\n win_height, win_width = self.window.getmaxyx()\n\n # if the list is longer than the maximum height, truncate it TODO: make something smarter here (scrolling?)\n if len(list) > win_height:\n list = list[:win_height-1]\n\n # iterate through all ToDos within the list\n for i, item in enumerate(list):\n # This one defines the layout\n desc = f\"{item['description']} [{item['project']}]\"\n # Truncate the description if too long\n if len(desc) > win_width - self.border_cells*2:\n # maximum length: window - border - length of project title - (space and square bracket chars ( = 3)) - (three dots)\n max_desc_length = win_width - self.border_cells*2 - len(item['project']) - 3 - 3\n desc = f\"{item['description'][:max_desc_length]}... [{item['project']}]\"\n # If not long enough, pad with spaces in order to paint a whole line\n else:\n desc = \"{:<{}}\".format(desc, win_width-2)\n \n if selected == i:\n highlight = curses.A_REVERSE\n else:\n highlight = curses.A_NORMAL\n\n # newlines are not supposed to be drawn\n desc = desc.replace('\\n', ' ')\n\n # Write description to the window\n self.window.addstr(i+3, 2,f\"{desc}\", self.colorize[i%2] | attr | highlight)\n\n self.refresh()", "def show2(self,win):\n\n # display invader\n # -------------\n win.addstr(self.yPos, self.xPos,\"-0-\")\n\n win.refresh()", "def show1(self,win):\n\n # display invader\n # -------------\n win.addstr(self.yPos, self.xPos,\"-o-\")\n\n win.refresh()", "def lidar_single_row():\r\n newwin = Toplevel(window)\r\n newwin.geometry(\"500x600\")\r\n\r\n global t\r\n t = Text(newwin)\r\n t.place(x=75, y=250, height=300, width=200)\r\n\r\n row_num = IntVar(newwin)\r\n row_choice = ['Choose Row']\r\n for i in range(0, data.shape[0]):\r\n row_choice.append(i)\r\n\r\n row_num.set('Choose Row')\r\n popupMenu = OptionMenu(newwin, row_num, *row_choice).place(x=0, y=0, width=150)\r\n\r\n azimuth_block_num = IntVar(newwin)\r\n azimuth_choices = ['Choose Azimuth Block', 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]\r\n azimuth_block_num.set('Choose Azimuth Block')\r\n popupMenu = OptionMenu(newwin, azimuth_block_num, *azimuth_choices).place(x=175, y=0, width=200)\r\n\r\n Label(newwin, text=\"Datablock parameter:\").place(x=175, y=75)\r\n\r\n timestamp = Button(newwin, text='Get Timestamp', fg=\"red\", command=lambda: print_list(get_timestamp(data, row_num.get(), single_row=True))).place(x=0, y=100, width=150)\r\n frame_id = Button(newwin, text='Get Frame Id', fg=\"red\", command=lambda: print_list(get_frame_id(data, row_num.get(), single_row=True))).place(x=0, y=125, width=150)\r\n measurement_id = Button(newwin, text='Get Measurement Id', fg=\"red\", command=lambda: print_list(get_measurement_id(data, row_num.get(), single_row=True))).place(x=0, y=150, width=150)\r\n encoder_count = Button(newwin, text='Get Encoder Count', fg=\"red\", command=lambda: print_list(get_encoder_count(data, row_num.get(), single_row=True))).place(x=0, y=175, width=150)\r\n signal_photon = Button(newwin, text='Get Signal Photons', fg=\"red\", command=lambda: print_list(get_signal_photons(data, row_num.get(), single_row=True, azimuth_block=azimuth_block_num.get()))).place(x=175, y=100, width=150)\r\n range_mm = Button(newwin, text='Get Range', fg=\"red\", command=lambda: print_list(get_range(data, row_num.get(), single_row=True, azimuth_block=azimuth_block_num.get()))).place(x=175, y=125, width=150)\r\n reflectivity = Button(newwin, text='Get Reflectivity', fg=\"red\", command=lambda: print_list(get_reflectivity(data, row_num.get(), single_row=True, azimuth_block=azimuth_block_num.get()))).place(x=175, y=150, width=150)\r\n noise_photon = Button(newwin, text='Get Noise Photons', fg=\"red\", command=lambda: print_list(get_noise_photons(data, row_num.get(), single_row=True, azimuth_block=azimuth_block_num.get()))).place(x=175, y=175, width=150)", "def listLoop(self):\n while self.state == \"LIST\":\n self.listBG = pygame.transform.smoothscale(pygame.image.load('assets/QuotesBlank.png').convert_alpha(), (self.width,self.height))\n self.screen.blit(self.listBG, (0, 0))\n #BUTTONS\n #pygame.draw.rect(self.screen, (255, 0, 0), pygame.Rect(450, 500, 100, 100), 3)\n #SCREEN WORDS\n myfont = pygame.font.Font('assets/BRLNSDB.TTF', 40)\n #text = myfont.render((self.AllQuotes.quoteDict[0][0]), True, (0,0,0))\n #print(self.AllQuotes.pagelist[0][0][0])\n\n ptext.draw(self.AllQuotes.pagelist[0][0][0],(140,23),width = 400,align=\"left\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n ptext.draw(self.AllQuotes.pagelist[0][0][1],(140,115),width = 400,align=\"left\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n ptext.draw(self.AllQuotes.pagelist[0][0][2],(440,115),width = 400,align=\"right\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n\n ptext.draw(self.AllQuotes.pagelist[0][1][0],(140,228),width = 400,align=\"left\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n ptext.draw(self.AllQuotes.pagelist[0][1][1],(140,320),width = 400,align=\"left\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n ptext.draw(self.AllQuotes.pagelist[0][1][2],(440,320),width = 400,align=\"right\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n\n ptext.draw(self.AllQuotes.pagelist[0][2][0],(140,433),width = 400,align=\"left\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n ptext.draw(self.AllQuotes.pagelist[0][2][1],(140,525),width = 400,align=\"left\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n ptext.draw(self.AllQuotes.pagelist[0][2][2],(440,525),width = 400,align=\"right\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n\n ptext.draw(self.AllQuotes.pagelist[0][3][0],(561,23),width = 400,align=\"left\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n ptext.draw(self.AllQuotes.pagelist[0][3][1],(561,115),width = 400,align=\"left\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n ptext.draw(self.AllQuotes.pagelist[0][3][2],(861,115),width = 400,align=\"right\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n\n ptext.draw(self.AllQuotes.pagelist[0][4][0],(561,228),width = 400,align=\"left\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n ptext.draw(self.AllQuotes.pagelist[0][4][1],(561,320),width = 400,align=\"left\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n ptext.draw(self.AllQuotes.pagelist[0][4][2],(861,320),width = 400,align=\"right\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n\n ptext.draw(self.AllQuotes.pagelist[0][5][0],(561,433),width = 400,align=\"left\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n ptext.draw(self.AllQuotes.pagelist[0][5][1],(561,525),width = 400,align=\"left\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n ptext.draw(self.AllQuotes.pagelist[0][5][2],(861,525),width = 400,align=\"right\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n #MOUSE\n pygame.mouse.set_visible(True)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n #BUTTON REPLACEMENT (WITH COORDINATES)\n if event.type == pygame.MOUSEBUTTONDOWN:\n temp=pygame.mouse.get_pos()\n print(temp)\n if((temp[0]>=892 and temp[0]<=951) and (temp[1]>=620 and temp[1]<=675)):\n self.state = \"LIST2\"\n self.mainLoop()\n if((temp[0]>=147 and temp[0]<=217) and (temp[1]>=646 and temp[1]<=675)):\n self.state = \"MENU\"\n self.mainLoop()\n pygame.display.flip()", "def write_stat(self, text):\n\t\tself.stat_win.erase()\n\t\trow = 1\n\t\tlines = textwrap.wrap(text, 26)\n\t\tfor line in lines:\n\t\t\tline = line[:STAT_WIN_COLS-1]\n\t\t\tself.stat_win.addstr(row, ui.COL, line, curses.color_pair(2))\n\t\t\trow += 1\n\t\t\tif row >= STAT_WIN_ROWS: \n\t\t\t\tself.stat_win.refresh()\n\t\t\t\tbreak\n\t\tself.stat_win.refresh()\n\t\tself.stat_row = row", "def imu_row_section():\r\n newwin = Toplevel(window)\r\n newwin.geometry(\"500x600\")\r\n\r\n global t\r\n t = Text(newwin)\r\n t.place(x=75, y=250, height=300, width=200)\r\n\r\n Label(newwin, text=\"Enter Rows\").place(x=0, y=0)\r\n entry1 = Entry(newwin)\r\n entry1.place(x=0, y=25, width=50)\r\n\r\n imu_time = Button(newwin, text=\"Get IMU Timestamp\", fg=\"red\", command=lambda: print_list(get_IMU_time(data, command(entry1), row_section=True))).place(x=0, y=50, width=150)\r\n accel_time = Button(newwin, text=\"Get Accelerometer Timestamp\", fg=\"red\", command=lambda: print_list(get_accel_time(data, command(entry1), row_section=True))).place(x=0, y=75, width=150)\r\n gyro_time = Button(newwin, text=\"Get Gyroscope Timestamp\", fg=\"red\", command=lambda: print_list(get_gyro_time(data, command(entry1), row_section=True))).place(x=0, y=100, width=150)\r\n x_accel = Button(newwin, text=\"Get x acceleration\", fg=\"red\", command=lambda: print_list(get_x_accel(data, command(entry1), row_section=True))).place(x=0, y=125, width=150)\r\n y_accel = Button(newwin, text=\"Get y acceleration\", fg=\"red\", command=lambda: print_list(get_y_accel(data, command(entry1), row_section=True))).place(x=175, y=50, width=150)\r\n z_accel = Button(newwin, text=\"Get z acceleration\", fg=\"red\", command=lambda: print_list(get_z_accel(data, command(entry1), row_section=True))).place(x=175, y=75, width=150)\r\n x_ang_vel = Button(newwin, text=\"Get x angular velocity\", fg=\"red\", command=lambda: print_list(get_x_ang_vel(data, command(entry1), row_section=True))).place(x=175, y=100, width=150)\r\n y_ang_vel = Button(newwin, text=\"Get y angular velocity\", fg=\"red\", command=lambda: print_list(get_y_ang_vel(data, command(entry1), row_section=True))).place(x=175, y=125, width=150)\r\n z_ang_vel = Button(newwin, text=\"Get z angular velocity\", fg=\"red\", command=lambda: print_list(get_z_ang_vel(data, command(entry1), row_section=True))).place(x=175, y=150, width=150)", "def lidar_row_section():\r\n newwin = Toplevel(window)\r\n newwin.geometry(\"500x600\")\r\n\r\n global t\r\n t = Text(newwin)\r\n t.place(x=75, y=250, height=300, width=200)\r\n\r\n Label(newwin, text=\"Enter rows\").place(x=0, y=0)\r\n entry1 = Entry(newwin)\r\n entry1.place(x=0, y=25, width=50)\r\n\r\n Label(newwin, text=\"Azimuth Block\").place(x=100, y=0)\r\n azimuth_block_num = IntVar(newwin)\r\n azimuth_choices = ['Choose Azimuth Block', 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]\r\n azimuth_block_num.set(0)\r\n popupMenu = OptionMenu(newwin, azimuth_block_num, *azimuth_choices).place(x=100, y=25, width=50)\r\n\r\n Label(newwin, text=\"Datablock parameter:\").place(x=175, y=75)\r\n\r\n timestamp = Button(newwin, text='Get Timestamp', fg=\"red\", command=lambda: print_list(get_timestamp(data, command(entry1), row_section=True))).place(x=0, y=100, width=150)\r\n frame_id = Button(newwin, text='Get Frame Id', fg=\"red\", command=lambda: print_list(get_frame_id(data, command(entry1), row_section=True))).place(x=0, y=125, width=150)\r\n measurement_id = Button(newwin, text='Get Measurement Id', fg=\"red\", command=lambda: print_list(get_measurement_id(data, command(entry1), row_section=True))).place(x=0, y=150, width=150)\r\n encoder_count = Button(newwin, text='Get Encoder Count', fg=\"red\", command=lambda: print_list(get_encoder_count(data, command(entry1), row_section=True))).place(x=0, y=175, width=150)\r\n signal_photon = Button(newwin, text='Get Signal Photons', fg=\"red\", command=lambda: print_list(get_signal_photons(data, command(entry1), row_section=True, azimuth_block=azimuth_block_num.get()))).place(x=175, y=100, width=150)\r\n range_mm = Button(newwin, text='Get Range', fg=\"red\", command=lambda: print_list(get_range(data, command(entry1), row_section=True, azimuth_block=azimuth_block_num.get()))).place(x=175, y=125, width=150)\r\n reflectivity = Button(newwin, text='Get Reflectivity', fg=\"red\", command=lambda: print_list(get_signal_photons(data, command(entry1), row_section=True, azimuth_block=azimuth_block_num.get()))).place(x=175, y=150, width=150)\r\n noise_photon = Button(newwin, text='Get Noise Photons', fg=\"red\", command=lambda: print_list(get_signal_photons(data, command(entry1), row_section=True, azimuth_block=azimuth_block_num.get()))).place(x=175, y=175, width=150)", "def imu_single_row():\r\n newwin = Toplevel(window)\r\n newwin.geometry(\"500x600\")\r\n\r\n global t\r\n t = Text(newwin)\r\n t.place(x=75, y=250, height=300, width=200)\r\n\r\n row_num = IntVar(newwin)\r\n row_choice = ['Choose Row']\r\n for i in range(0, data.shape[0]):\r\n row_choice.append(i)\r\n\r\n row_num.set('Choose Row')\r\n popupMenu = OptionMenu(newwin, row_num, *row_choice).place(x=0, y=0, width=150)\r\n\r\n imu_time = Button(newwin, text=\"Get IMU Timestamp\", fg=\"red\", command=lambda: print_list(get_IMU_time(data, row_num.get(), single_row=True))).place(x=0, y=50, width=150)\r\n accel_time = Button(newwin, text=\"Get Accelerometer Timestamp\", fg=\"red\", command=lambda: print_list(get_accel_time(data, row_num.get(), single_row=True))).place(x=0, y=75, width=150)\r\n gyro_time = Button(newwin, text=\"Get Gyroscope Timestamp\", fg=\"red\", command=lambda: print_list(get_gyro_time(data, row_num.get(), single_row=True))).place(x=0, y=100, width=150)\r\n x_accel = Button(newwin, text=\"Get x acceleration\", fg=\"red\", command=lambda: print_list(get_x_accel(data, row_num.get(), single_row=True))).place(x=0, y=125, width=150)\r\n y_accel = Button(newwin, text=\"Get y acceleration\", fg=\"red\", command=lambda: print_list(get_y_accel(data, row_num.get(), single_row=True))).place(x=175, y=50, width=150)\r\n z_accel = Button(newwin, text=\"Get z acceleration\", fg=\"red\", command=lambda: print_list(get_z_accel(data, row_num.get(), single_row=True))).place(x=175, y=75, width=150)\r\n x_ang_vel = Button(newwin, text=\"Get x angular velocity\", fg=\"red\", command=lambda: print_list(get_x_ang_vel(data, row_num.get(), single_row=True))).place(x=175, y=100, width=150)\r\n y_ang_vel = Button(newwin, text=\"Get y angular velocity\", fg=\"red\", command=lambda: print_list(get_y_ang_vel(data, row_num.get(), single_row=True))).place(x=175, y=125, width=150)\r\n z_ang_vel = Button(newwin, text=\"Get z angular velocity\", fg=\"red\", command=lambda: print_list(get_z_ang_vel(data, row_num.get(), single_row=True))).place(x=175, y=150, width=150)", "def add_to_events_to_draw(vDict, textEvent):\n\n windowDict = vDict['windowDict']\n\n textWindow = windowDict['textWindow']\n\n blankEventText = vDict['blankEventText']\n\n windowDict = vDict['windowDict']\n\n textWindow = windowDict['textWindow']\n\n lastEvent = vDict['eventsToPrint'][-1]\n\n match = textEvent == lastEvent.rstrip(' ')\n\n if match:\n # if textEvent equals the last line in the eventsToPrint list\n vDict['eventsToPrint'][-1] = '{} <x{}>'.format(textEvent, '2')\n elif vDict['eventsToPrint'][-1].startswith(textEvent):\n # elif the last line in the eventsToPrint starts with textEvent\n st = vDict['eventsToPrint'][-1].split(' <x')\n try:\n st1, st2 = st[0], int(st[1].strip('>'))\n vDict['eventsToPrint'][-1] = '{} <x{}>'.format(textEvent, st2 + 1)\n except IndexError:\n print('Index error')\n print(st)\n else:\n vDict['eventsToPrint'].popleft()\n vDict['eventsToPrint'].append(blankEventText.format(textEvent))\n #l = vDict['eventsToPrint'].pop(0)\n #vDict['eventsToPrint'].append(blankEventText.format(textEvent))\n\n for t in range(vDict['INFO_WINDOW_HEIGHT'] - 2):\n try:\n textWindow.draw_str(1, 1 + t, blankEventText)\n events_print = vDict['eventsToPrint'][t]\n textWindow.draw_str(1, 1 + t, events_print)\n except tdl.TDLError:\n pass\n\n # tdl.flush()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
one row below the most recent row written to writes out a list of strings to the main window
def write_main_artifact(self, text): row = self.main_row + 1 if isinstance(text, list): for line in text: if line == " ": row += 1 self.main_win.addstr(row, ui.COL, line, curses.A_BOLD) row +=1 if row >= MAIN_WIN_ROWS: break
[ "def write_main(self, text, player_name=None, row=1, col=1):\n\t\tself.main_win.erase()\n\t\tif isinstance(text, list):\n\t\t\tfor line in text:\n\t\t\t\tif line == \" \": row += 1\n\t\t\t\tif player_name is not None: line = replace_player_name(line, player_name)\n\t\t\t\tself.main_win.addstr(row, col, line, curses.A_BOLD)\n\t\t\t\trow +=1\n\t\t\t\tif row >= MAIN_WIN_ROWS: break\n\t\telif isinstance(text, basestring):\n\t\t\tif player_name is not None: text = replace_player_name(text, player_name)\n\t\t\tlines = textwrap.wrap(text, CHARS_PER_LINE)\n\t\t\tfor line in lines:\n\t\t\t\tself.main_win.addstr(row, col, line, curses.A_BOLD)\n\t\t\t\trow += 1\n\t\t\t\tif row >= MAIN_WIN_ROWS: break\n\t\telse:\n\t\t\tself.main_win.addstr('Error: did not receive list of strings or string')\n\t\tself.main_row = row", "def write_main_bottom(self, text):\n\t\tif len(text) > MAIN_WIN_COLS-2: text = text[:MAIN_WIN_COLS-2]\n\t\tblank_line = ' '*40\n\t\tself.main_win.addstr(MAIN_WIN_ROWS-1, ui.COL, blank_line)\n\t\tself.main_win.addstr(MAIN_WIN_ROWS-1, ui.COL, text, curses.color_pair(4))\n\t\tself.main_win.refresh()", "def write_stat_append(self, text):\n\t\trow = self.stat_row\n\t\tlines = textwrap.wrap(text, 26)\n\t\tfor line in lines:\n\t\t\tself.stat_win.addstr(row, ui.COL, line, curses.color_pair(3))\n\t\t\trow += 1\n\t\t\tif row >= STAT_WIN_ROWS: \n\t\t\t\tself.stat_win.refresh()\n\t\t\t\tbreak\n\t\tself.stat_win.refresh()", "def write_main_mid(self, text):\n\t\trow = self.main_row + 1\n\t\tlines = textwrap.wrap(text, CHARS_PER_LINE)\n\t\tfor line in lines:\n\t\t\tself.main_win.addstr(row, ui.COL, line, curses.A_BOLD)\n\t\t\trow += 1\n\t\t\tif row >= MAIN_WIN_ROWS: break\n\t\tif row < MAIN_WIN_ROWS:\n\t\t\tblank_line = \" \"*int(MAIN_WIN_COLS-2)\n\t\t\tfor _ in range(row, MAIN_WIN_ROWS-1):\n\t\t\t\tself.main_win.addstr(row, ui.COL,blank_line)\n\t\tself.main_row = row\n\t\tself.main_win.refresh()", "def display(self):\n self.window.erase()\n for idx, item in enumerate(self.items[self.top:self.top + self.max_lines]):\n # Highlight the current cursor line\n if idx == self.current:\n self.window.addstr(idx, 0, item, curses.color_pair(2))\n else:\n self.window.addstr(idx, 0, item, curses.color_pair(1))\n self.window.refresh()", "def write_stat(self, text):\n\t\tself.stat_win.erase()\n\t\trow = 1\n\t\tlines = textwrap.wrap(text, 26)\n\t\tfor line in lines:\n\t\t\tline = line[:STAT_WIN_COLS-1]\n\t\t\tself.stat_win.addstr(row, ui.COL, line, curses.color_pair(2))\n\t\t\trow += 1\n\t\t\tif row >= STAT_WIN_ROWS: \n\t\t\t\tself.stat_win.refresh()\n\t\t\t\tbreak\n\t\tself.stat_win.refresh()\n\t\tself.stat_row = row", "def show_output(self, message):\n if self.main_thread != get_ident():\n self.message_queue += [message]\n return\n\n for line in str(message).split('\\n'):\n self.output_text.insert(END, \"{}\\n\".format(line))\n\n self.output_text.see(\"end\") # scroll to end\n self.after(self.output_clear_time, self.clear_output)", "def showOutputLines(**kwargs):\n\n tableView = kwargs.pop(_ShowKey.tableView, None)\n proxyModel = kwargs.pop(_ShowKey.proxyModel, None)\n output = kwargs.pop(_ShowKey.output, None)\n outputType = kwargs.pop(_ShowKey.outputType, None)\n\n indexes = tableView.selectionModel().selectedRows()\n\n if len(indexes) == 1:\n output.clearSignal.emit()\n\n jobsDB = SqlJobsTable(config.get(ConfigKey.SystemDB))\n\n index = proxyModel.mapToSource(indexes[0])\n model = proxyModel.sourceModel()\n\n row = index.row()\n # column = index.column()\n job = model.dataset.data[row][\n JobHistoryKey.Status\n ].obj # TODO: change to status\n rowid = model.dataset.data[row][JobHistoryKey.ID].obj\n if job is None:\n # print(\"Fetching Job\")\n records = jobsDB.fetchJob({\"rowid\": rowid}, JobsTableKey.job)\n if records:\n record = records.fetchone()\n job = pickle.loads(zlib.decompress(record[1]))\n model.dataset.data[row][JobHistoryKey.Status].obj = copy.deepcopy(job)\n else:\n msg = \"Information cannot be read.\"\n output.insertTextSignal.emit(msg, {\"log\": False})\n return\n\n if outputType == _ShowKey.output:\n\n regPercentEx = re.compile(r\":\\W*(\\d+)%$\")\n # The file 'file name' has been opened for writing.\n # TODO: how to do it without locale dependency\n regOutputFileEx = re.compile(r\"file (.*?) has\")\n indexes = tableView.selectedIndexes()\n\n processedFiles = 0\n for line, arguments in job.output:\n if m := regPercentEx.search(line):\n n = int(m.group(1))\n if n < 100:\n continue\n if f := regOutputFileEx.search(line): # pylint: disable=unused-variable\n processedFiles += 1\n arguments[\"log\"] = False\n output.insertTextSignal.emit(line, arguments)\n # The signals are generated to fast and the History window\n # seems unresponsive\n sleep(0.000001)\n\n for line in job.oCommand.strCommands:\n output.insertTextSignal.emit(line, {\"log\": False})\n # The signals are generated to fast and the History window\n # seems unresponsive\n sleep(0.000001)\n\n msg = stats(job)\n\n output.insertTextSignal.emit(msg, {\"log\": False})\n\n elif outputType == _ShowKey.errors:\n\n for analysis in job.errors:\n if isinstance(analysis[1], dict):\n output.insertTextSignal.emit(analysis[0], analysis[1])\n sleep(0.000001)\n else:\n for i, m in enumerate(analysis):\n if i == 0:\n lines = m.split(\"\\n\")\n findSource = True\n for index, line in enumerate(lines):\n color = SvgColor.orange\n if findSource and (\n (searchIndex := line.find(\"File Name\")) >= 0\n ):\n if searchIndex >= 0:\n color = SvgColor.tomato\n findSource = False\n output.insertTextSignal.emit(\n line + \"\\n\", {\"color\": color, \"log\": False}\n )\n sleep(0.000001)\n else:\n output.insertTextSignal.emit(\n m, {\"color\": SvgColor.red, \"log\": False}\n )\n sleep(0.000001)\n jobsDB.close()", "def update(self, q):\n for line in iter_except(q.get_nowait, Empty): # display all content\n if line is None:\n self.tk_frame.after(500, self.update, q) # schedule next update\n return\n else:\n #self.tk_txt_out['text'] = line # update GUI\n # self.tk_txt_out.insert(END,line)\n self.insert_line_to_output(line,18)\n self.show_filename_in_textbox(self.tk_txt_out,self.output)\n break # display no more than one line per 40 milliseconds\n\n self.tk_frame.after(1, self.update, q) # schedule next update", "def endRow(self):\n\t\tnumWidgets = len(self.widgets)\n\t\t# Find number of GUI objects since last divie call\n\t\tsize = numWidgets - self.divieSize\n\t\tself.divies.append(JPLDivies('endRow',size))\n\t\tself.divieSize = numWidgets", "def endCol(self):\n\t\tnumWidgets = len(self.widgets)\n\t\t# Find number of GUI objects since last divie call\n\t\tsize = numWidgets - self.divieSize\n\t\tself.divies.append(JPLDivies('endCol',size))\n\t\tself.divieSize = numWidgets", "def imu_multiple_row():\r\n newwin = Toplevel(window)\r\n newwin.geometry(\"500x600\")\r\n\r\n global t\r\n t = Text(newwin)\r\n t.place(x=75, y=250, height=300, width=200)\r\n\r\n Label(newwin, text=\"Enter Rows\").place(x=0, y=0)\r\n entry1 = Entry(newwin)\r\n entry1.place(x=0, y=25, width=50)\r\n\r\n imu_time = Button(newwin, text=\"Get IMU Timestamp\", fg=\"red\", command=lambda: print_list(get_IMU_time(data, command(entry1), multiple_row=True))).place(x=0, y=50, width=150)\r\n accel_time = Button(newwin, text=\"Get Accelerometer Timestamp\", fg=\"red\", command=lambda: print_list(get_accel_time(data, command(entry1), multiple_row=True))).place(x=0, y=75, width=150)\r\n gyro_time = Button(newwin, text=\"Get Gyroscope Timestamp\", fg=\"red\", command=lambda: print_list(get_gyro_time(data, command(entry1), multiple_row=True))).place(x=0, y=100, width=150)\r\n x_accel = Button(newwin, text=\"Get x acceleration\", fg=\"red\", command=lambda: print_list(get_x_accel(data, command(entry1), multiple_row=True))).place(x=0, y=125, width=150)\r\n y_accel = Button(newwin, text=\"Get y acceleration\", fg=\"red\", command=lambda: print_list(get_y_accel(data, command(entry1), multiple_row=True))).place(x=175, y=50, width=150)\r\n z_accel = Button(newwin, text=\"Get z acceleration\", fg=\"red\", command=lambda: print_list(get_z_accel(data, command(entry1), multiple_row=True))).place(x=175, y=75, width=150)\r\n x_ang_vel = Button(newwin, text=\"Get x angular velocity\", fg=\"red\", command=lambda: print_list(get_x_ang_vel(data, command(entry1), multiple_row=True))).place(x=175, y=100, width=150)\r\n y_ang_vel = Button(newwin, text=\"Get y angular velocity\", fg=\"red\", command=lambda: print_list(get_y_ang_vel(data, command(entry1), multiple_row=True))).place(x=175, y=125, width=150)\r\n z_ang_vel = Button(newwin, text=\"Get z angular velocity\", fg=\"red\", command=lambda: print_list(get_z_ang_vel(data, command(entry1), multiple_row=True))).place(x=175, y=150, width=150)", "def write_time(self, text):\n\t\tself.time_win.erase()\n\t\trow = 1\n\t\tfor line in text:\n\t\t\tself.time_win.addstr(row, ui.COL, line, curses.color_pair(4))\n\t\t\trow += 1\n\t\t\tif row >= TIME_WIN_ROWS:\n\t\t\t\tbreak", "def shell(lista):", "def output(self):\n self.numList.reverse()\n def lengthFinder(columnNumber):\n currentLength=0\n longestLength=0\n for i in range(columnNumber, len(self.numList),5):\n currentLength=len(self.numList[i])\n if currentLength>longestLength:\n longestLength=currentLength\n return longestLength+1\n columnWidth=[]\n for i in range(5):\n columnWidth.append(lengthFinder(i))\n for i in range(len(self.numList)):\n print('{0:>{width}}'.format(self.numList[i], width=columnWidth[i%5]), end=' ')\n if i%5==4:\n print()\n print()", "def display_entry(row):\n print(\"\\n\" + blue_row(\"Task name: \" + row['name']))\n print(blue_row(\"Task date: \" + row['date'][:-9]))\n print(blue_row(\"Task minutes: \" + row['time']))\n print(blue_row(\"Task notes: \" + row['note']) + \"\\n\")", "def list_entries(entries):\r\n print('---------------')\r\n print(' ENTRIES')\r\n print('---------------')\r\n entrylist = entries.copy()\r\n while entrylist:\r\n print('* {} '.format(entrylist.pop(0)))\r\n print(' written {} '.format(entrylist.pop(0)))\r\n print(' @ {}'.format(entrylist.pop(0)))", "def add_log(self, text):\n if type(text) is list:\n for each in text:\n print(f'LOG: {each}')\n self.info_win.insert(tk.END, f'$ {each}\\n')\n else:\n print(f'LOG: {text}')\n self.info_win.insert(tk.END, f'$ {text}\\n')", "def show_lines(vfd, lines, delay=DEFAULT_DELAY):\n display = [\"\"]*LINES\n while True:\n line = lines.readline()\n if line == \"\":\n break\n display.append(line.rstrip())\n display.pop(0)\n for i, d in enumerate(display):\n vfd.write_line(i, d.ljust(NCHARS))\n time.sleep(delay)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
handles writting a string to middle of the main window starting 1 row below the main body of text
def write_main_mid(self, text): row = self.main_row + 1 lines = textwrap.wrap(text, CHARS_PER_LINE) for line in lines: self.main_win.addstr(row, ui.COL, line, curses.A_BOLD) row += 1 if row >= MAIN_WIN_ROWS: break if row < MAIN_WIN_ROWS: blank_line = " "*int(MAIN_WIN_COLS-2) for _ in range(row, MAIN_WIN_ROWS-1): self.main_win.addstr(row, ui.COL,blank_line) self.main_row = row self.main_win.refresh()
[ "def write_main_bottom(self, text):\n\t\tif len(text) > MAIN_WIN_COLS-2: text = text[:MAIN_WIN_COLS-2]\n\t\tblank_line = ' '*40\n\t\tself.main_win.addstr(MAIN_WIN_ROWS-1, ui.COL, blank_line)\n\t\tself.main_win.addstr(MAIN_WIN_ROWS-1, ui.COL, text, curses.color_pair(4))\n\t\tself.main_win.refresh()", "def write_main(self, text, player_name=None, row=1, col=1):\n\t\tself.main_win.erase()\n\t\tif isinstance(text, list):\n\t\t\tfor line in text:\n\t\t\t\tif line == \" \": row += 1\n\t\t\t\tif player_name is not None: line = replace_player_name(line, player_name)\n\t\t\t\tself.main_win.addstr(row, col, line, curses.A_BOLD)\n\t\t\t\trow +=1\n\t\t\t\tif row >= MAIN_WIN_ROWS: break\n\t\telif isinstance(text, basestring):\n\t\t\tif player_name is not None: text = replace_player_name(text, player_name)\n\t\t\tlines = textwrap.wrap(text, CHARS_PER_LINE)\n\t\t\tfor line in lines:\n\t\t\t\tself.main_win.addstr(row, col, line, curses.A_BOLD)\n\t\t\t\trow += 1\n\t\t\t\tif row >= MAIN_WIN_ROWS: break\n\t\telse:\n\t\t\tself.main_win.addstr('Error: did not receive list of strings or string')\n\t\tself.main_row = row", "def write_main_artifact(self, text):\n\t\trow = self.main_row + 1\n\t\tif isinstance(text, list):\n\t\t\tfor line in text:\n\t\t\t\tif line == \" \": row += 1\n\t\t\t\tself.main_win.addstr(row, ui.COL, line, curses.A_BOLD)\n\t\t\t\trow +=1\n\t\t\t\tif row >= MAIN_WIN_ROWS: break", "def Write(msg):\n # Counts the number of lines in the text box by finding the index of the last line and returns it as an integer\n numlines = int(msgbox.index('end - 1 line').split('.')[0])\n # Deletes the first line of text in the text box if there are more than 5 lines in the box\n if numlines > 5:\n msgbox.delete(1.0, 2.0)\n #insert message and newline in box\n msgbox.insert('end', msg)\n msgbox.insert('end', '\\n')", "def update_text(self):\n\t\t#Get the desired min & max row indices\n\t\ttop = self.view.y\n\t\tbottom = self.view.y + TERMINAL_ROWS\n\t\t#Get the desired min & max column indices\n\t\tleft = self.view.x\n\t\tright = self.view.x + TERMINAL_COLS\n\t\t#Load the map background into the text buffer\n\t\tfor y, row in enumerate(self.map.text[top:bottom]):\n\t\t\t#self.view.text[y] = self.map.text[y][left:right]\n\t\t\tself.view.text[y] = row[left:right]\n\n\t\t#Load the player avatar into the text buffer\n\t\t#line_list = list(self.view.text[self.player.y])\n\t\t#line_list[self.player.x] = self.player.avatar\n\t\t#self.view.text[self.player.y] = \"\".join(line_list)", "def _write(self, text):\n self.appendPlainText(text)\n self.repaint()", "def textArea(self, text, position = [0, 0], align = \"left\", percent_style = False):\n\t\tself.readyButtons('BOTH')\n\t\tprinted_text = text\n\t\tper_page = 0\n\t\ttext_array = []\n\t\tc = 0\n\t\twhile 1:\n\t\t\tself.resetBuffer()\n\t\t\tlines, printed_lines, printed = self.text(printed_text, [0, 0], 'center')\n\n\t\t\tif text_array == []:\n\t\t\t\ttext_array = printed\n\t\t\t\tper_page = printed_lines\n\n\t\t\t#print line\n\t\t\tself.rectangle([[0,59],[192,63]], False)\n\t\t\t#filling\n\t\t\tif percent_style:\n\t\t\t\tself.rectangle([[-1,60], [int((192/len(text_array))*(c + printed_lines)), 62]])\n\t\t\telse:\n\t\t\t\tself.rectangle([[int((192/len(text_array))*c), 60], [int((192/len(text_array))*(c + printed_lines)), 62]])\n\t\t\tself.rewrite()\n\n\t\t\t#parse new text \n\t\t\twhile 1:\n\t\t\t\tjoy = self.joystick()\n\t\t\t\t#can down\n\t\t\t\tif self.buttons() == 'BOTH':\n\t\t\t\t\treturn self.readyButtons('BOTH', False)\n\t\t\t\tif (c + printed_lines) < len(text_array):\n\t\t\t\t\tif joy == 'down' or joy == \"right\":\n\t\t\t\t\t\tc += printed_lines\n\t\t\t\t\t\tbreak\n\t\t\t\t#can up\n\t\t\t\tif c > 0:\n\t\t\t\t\tif joy == 'up' or joy == \"left\":\n\t\t\t\t\t\tc -= printed_lines\n\t\t\t\t\t\tif c < 0:\n\t\t\t\t\t\t\tc = 0\n\t\t\t\t\t\tbreak\n\t\t\t\ttime.sleep(0.1)\n\t\t\tprinted_text = \" \".join(text_array[c: c + per_page])", "def write(self, text):\n self.get_widget().configure(state='normal')\n self.get_widget().insert(tk.END, text)\n self.get_widget().see('end')\n self.get_widget().configure(state='disabled')\n self.get_widget().update()", "def writeText(stdscr, string):\n clearText(stdscr)\n row = TEXT_START_ROW\n words = string.split(\" \")\n\n currentLine = words[0]\n\n for word in words[1:]:\n if len(word) + len(currentLine) + 1 > MAX_STR_LEN:\n stdscr.addstr(row, TEXT_START_POS, currentLine)\n row += 1\n currentLine = word\n else:\n currentLine += \" \" + word\n\n # Empty the buffer\n stdscr.addstr(row, TEXT_START_POS, currentLine)", "def write_stat(self, text):\n\t\tself.stat_win.erase()\n\t\trow = 1\n\t\tlines = textwrap.wrap(text, 26)\n\t\tfor line in lines:\n\t\t\tline = line[:STAT_WIN_COLS-1]\n\t\t\tself.stat_win.addstr(row, ui.COL, line, curses.color_pair(2))\n\t\t\trow += 1\n\t\t\tif row >= STAT_WIN_ROWS: \n\t\t\t\tself.stat_win.refresh()\n\t\t\t\tbreak\n\t\tself.stat_win.refresh()\n\t\tself.stat_row = row", "def _addstr(self, y, x, prefix, string, postfix):\n str_len = len(string)\n\n # Ensure that we have not written up to the last character in the window. This is\n # a quirk with curses that needs to be replicated\n if (\n # Ensure that y is not greater than the window size\n y > self.window_size[0]\n # Ensure that y is not negative\n or y < 0\n # Ensure that x plus whatever we are writing to the screen is less than or equal to\n # the width of the window\n or x + str_len > self.window_size[1]\n # Ensure that x is not negative\n or x < 0\n ):\n raise curses.error(\n \"A Out of Bounds: (\"\n + str(y)\n + \",\"\n + str(x)\n + \"->\"\n + str(str_len)\n + \") \"\n + str(self.window_size)\n )\n\n # Make sure that the last character in the window will not be written to\n if y >= self.window_size[0] and x + str_len >= self.window_size[1]:\n raise curses.error(\"Can't write to the last cell in a window\")\n\n # Write the string to the screen\n self._write_pos(y, x, prefix, string, postfix)", "def output_add(self, txt):\r\n self.output.configure(state='normal')\r\n self.output.insert('end', '{}\\n\\n'.format(txt))\r\n self.output.configure(state='disabled')\r\n self.output.see(tk.END)", "def printText(self, text_string, start_xy):\n word_list = textwrap.wrap(text=text_string, width=UIC.Wrap_Width)\n start_xy_wfeed = start_xy # 'wfeed' -> \"with line feed\"\n for element in word_list:\n text = UIC.Big_Text.render(element, True, UIC.Blue, UIC.White)\n text_rect = text.get_rect()\n text_rect.topleft = start_xy_wfeed\n self.display.blit(text, text_rect)\n start_xy_wfeed = (start_xy_wfeed[0], start_xy_wfeed[1] + UIC.Text_Feed)", "def _insert_to_text_area(st_widget, content):\n st_widget.delete('1.0', 'end')\n st_widget.insert('end', content)\n # st_widget.see(tk.END)\n st_widget.update_idletasks()", "def write_stat_append(self, text):\n\t\trow = self.stat_row\n\t\tlines = textwrap.wrap(text, 26)\n\t\tfor line in lines:\n\t\t\tself.stat_win.addstr(row, ui.COL, line, curses.color_pair(3))\n\t\t\trow += 1\n\t\t\tif row >= STAT_WIN_ROWS: \n\t\t\t\tself.stat_win.refresh()\n\t\t\t\tbreak\n\t\tself.stat_win.refresh()", "def write(string, pos=Point(0,0)):\n for i, line in enumerate(string.splitlines()):\n stdout.write(f\"\\u001b[{pos.row + i};{pos.col}H{line}\")\n stdout.flush()", "def create_text_window(self):\n win = self.tkinter.Toplevel()\n if ON_WINDOWS:\n win.attributes(\"-toolwindow\", 1)\n win.title('Details')\n win.protocol('WM_DELETE_WINDOW', self.when_message_window_x)\n self.message_window = win\n self.text = self.tkinter.Text(win)\n self.message_window.withdraw()", "def print_centered(msg, fill=' ', window_size=80):\n half = int((window_size - len(msg)) / 2)\n print fill * half + msg + fill * (window_size - half)", "def show_output(self, message):\n if self.main_thread != get_ident():\n self.message_queue += [message]\n return\n\n for line in str(message).split('\\n'):\n self.output_text.insert(END, \"{}\\n\".format(line))\n\n self.output_text.see(\"end\") # scroll to end\n self.after(self.output_clear_time, self.clear_output)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
writes a signle line of text less that the length of the main window to the last row of the main window
def write_main_bottom(self, text): if len(text) > MAIN_WIN_COLS-2: text = text[:MAIN_WIN_COLS-2] blank_line = ' '*40 self.main_win.addstr(MAIN_WIN_ROWS-1, ui.COL, blank_line) self.main_win.addstr(MAIN_WIN_ROWS-1, ui.COL, text, curses.color_pair(4)) self.main_win.refresh()
[ "def write_main_mid(self, text):\n\t\trow = self.main_row + 1\n\t\tlines = textwrap.wrap(text, CHARS_PER_LINE)\n\t\tfor line in lines:\n\t\t\tself.main_win.addstr(row, ui.COL, line, curses.A_BOLD)\n\t\t\trow += 1\n\t\t\tif row >= MAIN_WIN_ROWS: break\n\t\tif row < MAIN_WIN_ROWS:\n\t\t\tblank_line = \" \"*int(MAIN_WIN_COLS-2)\n\t\t\tfor _ in range(row, MAIN_WIN_ROWS-1):\n\t\t\t\tself.main_win.addstr(row, ui.COL,blank_line)\n\t\tself.main_row = row\n\t\tself.main_win.refresh()", "def write_stat_append(self, text):\n\t\trow = self.stat_row\n\t\tlines = textwrap.wrap(text, 26)\n\t\tfor line in lines:\n\t\t\tself.stat_win.addstr(row, ui.COL, line, curses.color_pair(3))\n\t\t\trow += 1\n\t\t\tif row >= STAT_WIN_ROWS: \n\t\t\t\tself.stat_win.refresh()\n\t\t\t\tbreak\n\t\tself.stat_win.refresh()", "def write_main(self, text, player_name=None, row=1, col=1):\n\t\tself.main_win.erase()\n\t\tif isinstance(text, list):\n\t\t\tfor line in text:\n\t\t\t\tif line == \" \": row += 1\n\t\t\t\tif player_name is not None: line = replace_player_name(line, player_name)\n\t\t\t\tself.main_win.addstr(row, col, line, curses.A_BOLD)\n\t\t\t\trow +=1\n\t\t\t\tif row >= MAIN_WIN_ROWS: break\n\t\telif isinstance(text, basestring):\n\t\t\tif player_name is not None: text = replace_player_name(text, player_name)\n\t\t\tlines = textwrap.wrap(text, CHARS_PER_LINE)\n\t\t\tfor line in lines:\n\t\t\t\tself.main_win.addstr(row, col, line, curses.A_BOLD)\n\t\t\t\trow += 1\n\t\t\t\tif row >= MAIN_WIN_ROWS: break\n\t\telse:\n\t\t\tself.main_win.addstr('Error: did not receive list of strings or string')\n\t\tself.main_row = row", "def write_main_artifact(self, text):\n\t\trow = self.main_row + 1\n\t\tif isinstance(text, list):\n\t\t\tfor line in text:\n\t\t\t\tif line == \" \": row += 1\n\t\t\t\tself.main_win.addstr(row, ui.COL, line, curses.A_BOLD)\n\t\t\t\trow +=1\n\t\t\t\tif row >= MAIN_WIN_ROWS: break", "def finish_line(self, color: Optional[Tuple] = None) -> None:\n (y, x) = self._win.getyx()\n size = self.width - x\n if color:\n self.addnstr(' ' * size, size, to_curses_attr(color))\n else:\n self.addnstr(' ' * size, size)", "def write_stat(self, text):\n\t\tself.stat_win.erase()\n\t\trow = 1\n\t\tlines = textwrap.wrap(text, 26)\n\t\tfor line in lines:\n\t\t\tline = line[:STAT_WIN_COLS-1]\n\t\t\tself.stat_win.addstr(row, ui.COL, line, curses.color_pair(2))\n\t\t\trow += 1\n\t\t\tif row >= STAT_WIN_ROWS: \n\t\t\t\tself.stat_win.refresh()\n\t\t\t\tbreak\n\t\tself.stat_win.refresh()\n\t\tself.stat_row = row", "def output_add(self, txt):\r\n self.output.configure(state='normal')\r\n self.output.insert('end', '{}\\n\\n'.format(txt))\r\n self.output.configure(state='disabled')\r\n self.output.see(tk.END)", "def append_text(self, text: str, color: QColor=QColor(0, 0, 0)) -> None:\n\n self.gui.textWindow.setTextColor(color)\n self.gui.textWindow.append(text)\n maxVal = self.gui.textWindow.verticalScrollBar().maximum()\n self.gui.textWindow.verticalScrollBar().setValue(maxVal)", "def running_line(text, window_size, tick):\n return ''", "def write_time(self, text):\n\t\tself.time_win.erase()\n\t\trow = 1\n\t\tfor line in text:\n\t\t\tself.time_win.addstr(row, ui.COL, line, curses.color_pair(4))\n\t\t\trow += 1\n\t\t\tif row >= TIME_WIN_ROWS:\n\t\t\t\tbreak", "def update_text(self):\n\t\t#Get the desired min & max row indices\n\t\ttop = self.view.y\n\t\tbottom = self.view.y + TERMINAL_ROWS\n\t\t#Get the desired min & max column indices\n\t\tleft = self.view.x\n\t\tright = self.view.x + TERMINAL_COLS\n\t\t#Load the map background into the text buffer\n\t\tfor y, row in enumerate(self.map.text[top:bottom]):\n\t\t\t#self.view.text[y] = self.map.text[y][left:right]\n\t\t\tself.view.text[y] = row[left:right]\n\n\t\t#Load the player avatar into the text buffer\n\t\t#line_list = list(self.view.text[self.player.y])\n\t\t#line_list[self.player.x] = self.player.avatar\n\t\t#self.view.text[self.player.y] = \"\".join(line_list)", "def display(self):\n self.window.erase()\n for idx, item in enumerate(self.items[self.top:self.top + self.max_lines]):\n # Highlight the current cursor line\n if idx == self.current:\n self.window.addstr(idx, 0, item, curses.color_pair(2))\n else:\n self.window.addstr(idx, 0, item, curses.color_pair(1))\n self.window.refresh()", "def _draw_status_line(self, left, top, width):\n # TODO: can't write to bottom right cell\n mode = '{} {} {}'.format(self._filename, self._mode.upper(),\n self._message).ljust(width - 1)\n self._stdscr.addstr(top, left, mode, curses.A_REVERSE)\n position = 'LN {}:{} '.format(self._row + 1, self._col + 1)\n self._stdscr.addstr(top, left + width - 1 - len(position), position,\n curses.A_REVERSE)", "def Write(msg):\n # Counts the number of lines in the text box by finding the index of the last line and returns it as an integer\n numlines = int(msgbox.index('end - 1 line').split('.')[0])\n # Deletes the first line of text in the text box if there are more than 5 lines in the box\n if numlines > 5:\n msgbox.delete(1.0, 2.0)\n #insert message and newline in box\n msgbox.insert('end', msg)\n msgbox.insert('end', '\\n')", "def write(self, text):\n self.get_widget().configure(state='normal')\n self.get_widget().insert(tk.END, text)\n self.get_widget().see('end')\n self.get_widget().configure(state='disabled')\n self.get_widget().update()", "def endRow(self):\n\t\tnumWidgets = len(self.widgets)\n\t\t# Find number of GUI objects since last divie call\n\t\tsize = numWidgets - self.divieSize\n\t\tself.divies.append(JPLDivies('endRow',size))\n\t\tself.divieSize = numWidgets", "def lineAfterFont(self):\n if True:\n return\n if self.mtd[self.ptr] == 1:\n self.ptr += 1\n self.endct += 1\n print(\"LINE after font\")\n if self.mtd[self.ptr] == 0:\n # ptr++;\n # endct--;\n print(\"END after font LINE\")", "def draw_end_screen(self):\n\n pyxel.cls(col=COL_FINISH)\n\n display_text = TEXT_FINISH[:]\n\n if self.l_score >= WIN_CONDITION:\n winner = \"The LEFT player!\"\n else:\n winner = \"The RIGHT player!\"\n display_text.insert(1, winner)\n for i, text in enumerate(display_text):\n y_offset = (FONT_HEIGHT + 2) * i\n text_x = self.center_text(text, WIDTH)\n pyxel.text(text_x, HEIGHT_FINISH + y_offset, text, COL_FINISH_TEXT)", "def create_text_window(self):\n win = self.tkinter.Toplevel()\n if ON_WINDOWS:\n win.attributes(\"-toolwindow\", 1)\n win.title('Details')\n win.protocol('WM_DELETE_WINDOW', self.when_message_window_x)\n self.message_window = win\n self.text = self.tkinter.Text(win)\n self.message_window.withdraw()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
writes to the stat window that typically contains the character's illness, hunger and cold. Inventory also gets written to this window stores the last row written to in this window stops if we get to the last row
def write_stat(self, text): self.stat_win.erase() row = 1 lines = textwrap.wrap(text, 26) for line in lines: line = line[:STAT_WIN_COLS-1] self.stat_win.addstr(row, ui.COL, line, curses.color_pair(2)) row += 1 if row >= STAT_WIN_ROWS: self.stat_win.refresh() break self.stat_win.refresh() self.stat_row = row
[ "def write_stat_append(self, text):\n\t\trow = self.stat_row\n\t\tlines = textwrap.wrap(text, 26)\n\t\tfor line in lines:\n\t\t\tself.stat_win.addstr(row, ui.COL, line, curses.color_pair(3))\n\t\t\trow += 1\n\t\t\tif row >= STAT_WIN_ROWS: \n\t\t\t\tself.stat_win.refresh()\n\t\t\t\tbreak\n\t\tself.stat_win.refresh()", "def display_inventory(self, lvl, window):\n if \"syringe\" in self.inventory:\n x = sprite_size * (16 + 1.5)\n y = sprite_size * (12 + 1)\n window.blit(lvl.style[\"syringe\"], (x, y))\n else:\n i = 0\n for line in [12]:\n for col in [16, 17, 18]:\n x_slot = sprite_size * (col + 1.5)\n y_slot = sprite_size * (line + 1)\n window.blit(lvl.style[self.inventory[i]], (x_slot, y_slot))\n i += 1\n if \"ether\" in self.inventory and \\\n \"needle\" in self.inventory and \\\n \"tube\" in self.inventory:\n self.inventory = [\"syringe\"]\n pygame.display.flip()", "def _draw_inventory(vDict):\n\n windowDict = vDict['windowDict']\n invenWindow = windowDict['invenWindow']\n\n blankInvenText = vDict['blankInvenText']\n\n player = vDict['gameLevel'].player\n\n inven = player.inventory\n\n invenText = [('Inventory:', WHITE)]\n invenText += list((i.fullName, i.checkEquipColor(player)) for i in inven if i is not None and i.isValidItem)\n\n y = 1\n\n for i in invenText:\n invenWindow.draw_str(1, y, blankInvenText.format(i[0]), fg=tuple(i[1]))\n y += 1\n\n tdl.flush()", "def redraw_status_window(vDict):\n player = vDict['gameLevel'].player\n\n windowDict = vDict['windowDict']\n statusWindow = windowDict['statusWindow']\n\n blankInvenText = vDict['blankInvenText']\n\n halfBlankInvenText = vDict['halfBlankInvenText']\n\n y = 1\n\n for i in ('Species: {0.species.name}', 'Health: {0.health.amount}/{0.maxHealth}',\n 'Stamna: {0.stamna.amount}/{0.maxStamna}', '{0.magic.amount}/{0.maxMagic}',\n '{0.describeHunger}'):\n statusWindow.draw_str(\n 1, y, halfBlankInvenText.format(\n i.format(player)\n ))\n\n y += 1\n\n y = 1\n\n halfWidth = statusWindow.width // 2\n\n for i, s in zip(('STR', 'END', 'AGI', 'DEX', 'MIN', 'WIL', 'PER', 'MAG'), ALL_STATS):\n statusWindow.draw_str(halfWidth, y, halfBlankInvenText.format('{}: {}'.format(i, player.getTotalStat(s))))\n\n y += 1\n\n tdl.flush()", "def draw_inventory(self, inventory):\r\n self.screen.blit(self.inventory, (480, 0))\r\n for i in range(len(inventory.get_items())):\r\n display = self.small_font.render(inventory.get_items()[i], True, WHITE, BLACK)\r\n self.screen.blit(display, (480, TILE_SIZE//2*(i+1)))", "def write_file(self):\n\n # Opens profile text file\n wfile = open('item_data.txt','w+')\n # Rewrites text file with the current item object information\n wfile.write(\"Item Code,Item,Qualifier,Price ($),Item(s) in Stock\\n\")\n for ilist in self.cate_list:\n for product in ilist:\n # Converts object information to formatted string\n rewrite = \"{0},{1},{2},{3},{4}\\n\".format(product.itemCode,product.item,product.qualifier,product.price,product.itemsLeft)\n wfile.write(rewrite)\n wfile.close()\n\n # Updates inventory lists to current information\n self.load()", "def print_inventory(self):\n\t\tprint(\"You have beaten {} tiles!\".format(World.how_many_tile()))\n\t\tprint(\"You have {} guesses remaining\".format(self.guesses_remaining))\n\n\t\tprint(\"And you are located at {}, {}\".format(self.location_x, self.location_y))\n\t\tfor item in self.inventory:\n\t\t\tprint(item, '\\n')", "def open_stat(self):\r\n hp = f\"HP: {self.player.hp}/{self.player.max_hp}\".ljust(10)\r\n lv = f\"LV: {self.player.lv}\".ljust(10)\r\n name = self.player.name\r\n layout = [\r\n [sg.Text(f'\"{name}\"')],\r\n [sg.Text(lv)],\r\n [sg.Text(hp)],\r\n [sg.Button(\"Exit\", size=(10, 1), button_color=(\"#edf2ce\", \"#444444\"))]\r\n ]\r\n window = sg.Window(\"Stats\", layout, size=(250, 500), element_justification='c')\r\n window.read()\r\n window.close()", "def blitHUD(self):\n #blit self.daytime cover\n self.game.screen.blit(self.screen_cover, [0, 0])\n\n #hp bar creation\n blit_surface = pygame.Surface((abs(392 * (float(self.game.Player.stats['hp']) / self.game.Player.stats['maxhp'])), 24), pygame.SRCALPHA)\n blit_surface.fill((234, 0, 0, 213))\n self.game.screen.blit(blit_surface, (254, 589))\n\n for monster in self.game.EntityHandler.monsters:\n if monster.NPC == True and monster.interacting == True:\n self.game.screen.blit(self.text_box, [30, 350])\n self.game.screen.blit(self.thumb,[40, 360])\n\n #xp bar(s)\n blit_surface = pygame.Surface((497 * (float(self.game.Player.stats['mxp']) / self.game.Player.stats['maxmxp']), 12), pygame.SRCALPHA)\n blit_surface.fill((72, 196, 19, 221))\n self.game.screen.blit(blit_surface, (201, 636))\n blit_surface = pygame.Surface((497 * (float(self.game.Player.stats['pxp']) / self.game.Player.stats['maxpxp']), 12), pygame.SRCALPHA)\n blit_surface.fill((229, 102, 18, 221))\n self.game.screen.blit(blit_surface, (201, 622))\n\n self.game.screen.blit(self.xpbar, [200, 636])\n self.game.screen.blit(self.xpbar, [200, 622])\n self.game.screen.blit(self.hpbar, [250, 585])\n\n if self.chat_active:\n #blit the chat bar\n self.game.screen.blit(self.chat_bar, (25, 625))\n self.game.screen.blit(self.game.default_font.render(self.chat_message, True, (255, 255, 255)), [30, 628])\n\n if self.text_active or self.body_text[0]:\n self.showPrompt()", "def statusWrite(self):\n # this function is executed automatically (initialized at the bottom of this file), simply records current pxp status in a file\n # the text status is saved simply as 'status' and the numeric status code is saved as 'statuscode'\n # replace() is to make sure that if status has \\ or ', it won't break the command and give invalid status\n # os.system(\"echo '\"+json.dumps({\"status\":self.status.replace(\"\\\"\",\"\\\\\\\"\"), \"code\":self.code})+\"' > \"+c.encStatFile)\n if(self.lastWritten==self.code):\n return #this status was already written - do nothing\n try:\n with open(c.encStatFile,\"wb\") as f:\n f.write(json.dumps({\"status\":self.status.replace(\"\\\"\",\"\\\\\\\"\"), \"code\":self.code}))\n self.lastWritten = self.code\n except:\n pass", "def draw_player_stats(self, surface, player_sprite, index):\n items = prepare.GFX[\"misc\"][\"sidebargfx\"].subsurface(20, 160, 80, 100)\n icons = prepare.GFX[\"misc\"][\"icons\"]\n surface.blit(items, (ITEM_IMAGES[0],ITEM_IMAGES[1]+index*SLOT_SPACER))\n for i,stat in enumerate([\"money\", \"keys\"]):\n num = player_sprite.inventory[stat]\n pos_y = ITEM_START[1]+index*SLOT_SPACER+i*ITEM_SPACER\n rend_it = (SMALL_FONT, str(num), pg.Color(\"white\"), self.rendered)\n surface.blit(tools.get_rendered(*rend_it), (ITEM_START[0], pos_y))\n defense = str(player_sprite.defense)\n strength = str(player_sprite.strength)\n speed = \"{:.1f}\".format(player_sprite.speed*2)\n for i,stat in enumerate((strength,defense,speed)):\n pos = STAT_START[0]+STAT_SPACER*i, STAT_START[1]+SLOT_SPACER*index\n surface.blit(icons, pos, (34*i,0,34,34))\n rend_it = (SMALL_FONT, stat, pg.Color(\"white\"), self.rendered)\n rendered = tools.get_rendered(*rend_it)\n surface.blit(rendered, (pos[0]+STAT_TEXT_SPACE,pos[1]))", "def inventory(self):\n \n print(\"What do we got at this party?\")\n print(\"Attendees: {0}\".format(self.attendees))\n print(\"Attendees w/hats: {0}\".format(self.party_hatted_attendees))\n print(\"Beers: {0}\".format(self.beers))\n print(\"Sangrias: {0}\".format(self.sangrias))\n print(\"Wines: {0}\".format(self.wines))\n print(\"Lemonades: {0}\".format(self.lemonades))\n print(\"=\"*40)", "def display_equipped(self):\r\n \r\n #iterates through the equipped dictionary, displaying each\r\n #items stats in a neat fashion\r\n for slot, item in self._equipped.iteritems():\r\n \r\n if not item:\r\n \r\n print Slot(int(slot)).name + \": Empty\"\r\n\r\n continue\r\n \r\n item_data = item.get_bonuses()\r\n stats = \"\"\r\n\r\n #appends the stats string the bonuses the item provides\r\n if(conf.POWER_DATA in item_data.keys()):\r\n stats += \" Power: +\" + str(item_data[conf.POWER_DATA])\r\n if(conf.MAX_HEALTH_DATA in item_data.keys()):\r\n stats += \" Health: +\" + str(item_data[conf.MAX_HEALTH_DATA])\r\n if(conf.MAX_MANA_DATA in item_data.keys()):\r\n stats += \" Mana: +\" + str(item_data[conf.MAX_MANA_DATA])\r\n if(conf.SPEED_DATA in item_data.keys()):\r\n stats += \" Speed: +\" + str(item_data[conf.SPEED_DATA])\r\n \r\n print Slot(int(slot)).name + \": \" + str(item) + \\\r\n \" [\" + stats + \" ]\"", "def write_main(self, text, player_name=None, row=1, col=1):\n\t\tself.main_win.erase()\n\t\tif isinstance(text, list):\n\t\t\tfor line in text:\n\t\t\t\tif line == \" \": row += 1\n\t\t\t\tif player_name is not None: line = replace_player_name(line, player_name)\n\t\t\t\tself.main_win.addstr(row, col, line, curses.A_BOLD)\n\t\t\t\trow +=1\n\t\t\t\tif row >= MAIN_WIN_ROWS: break\n\t\telif isinstance(text, basestring):\n\t\t\tif player_name is not None: text = replace_player_name(text, player_name)\n\t\t\tlines = textwrap.wrap(text, CHARS_PER_LINE)\n\t\t\tfor line in lines:\n\t\t\t\tself.main_win.addstr(row, col, line, curses.A_BOLD)\n\t\t\t\trow += 1\n\t\t\t\tif row >= MAIN_WIN_ROWS: break\n\t\telse:\n\t\t\tself.main_win.addstr('Error: did not receive list of strings or string')\n\t\tself.main_row = row", "def update_pos(self):\n for pid, lvl in self.positions.items():\n if self.windows[lvl + 1].has_pos(self.mc.entity.getPos(pid)):\n # Reached the next level (window)\n self.queues[lvl].put((Cmd.EXI, [pid]))\n lvl += 1\n self.positions[pid] += 1\n self.queues[lvl].put((Cmd.ENT, [pid]))\n print(f\" {self.players[pid]} ({pid}) reached {lvl}\")\n self.mc.postToChat(f\"{self.players[pid]} made it to Level {lvl}\")", "def update_game_stats(self):\r\n self.games_played +=1\r\n if player_won(self.board)==1:\r\n self.p1_wins +=1\r\n elif player_won(self.board)==2:\r\n self.p2_wins +=1\r\n elif player_won(self.board)==0 and board_full(self.board):\r\n self.games_tied +=1\r\n \r\n print \"Game Statistics:\"\r\n print \"-Total games played: \", self.games_played\r\n print \"-Number of games P1 won: \", self.p1_wins\r\n print \"-Number of games P2 won: \", self.p2_wins\r\n print \"-Number of games tied: \", self.games_tied\r\n print \"-Steps made by each player in most recent game:\"\r\n print self.last_game_record", "def _draw_abilities(vDict):\n windowDict = vDict['windowDict']\n invenWindow = windowDict['invenWindow']\n\n blankInvenText = vDict['blankInvenText']\n\n player = vDict['gameLevel'].player\n\n y = 1\n\n innate_abilities = player.species.innateAbilites\n\n abil_text = []\n\n check_color = lambda c, co1, co2: co1 if c else co2\n\n if len(innate_abilities) > 0:\n abil_text += [('Inate abilities:', WHITE)]\n abil_text += [(str(a), check_color(player.canUsePower(a), WHITE, PURPLE)) for a in innate_abilities]\n\n grimore = player.grimore\n\n if len(grimore) > 0:\n abil_text += [('Grimore:', WHITE)]\n abil_text += [(str(a), check_color(player.canUsePower(a), WHITE, PURPLE)) for a in grimore]\n\n for a in abil_text:\n invenWindow.draw_str(1, y, blankInvenText.format(blankInvenText.format(a[0])), a[1])\n\n tdl.flush()", "def writePhysical(self, fname):\n import os\n file = open(fname, 'a')\n file.write('// --- Wing physical groups ---\\n')\n file.write('Physical Surface(\"wing\") = {')\n for i in range(0, self.n-1):\n for j in range(0, 3):\n file.write('{0:d},'.format(self.surN[i][j]))\n file.seek(0, os.SEEK_END) # first seek end of file; f.seek(0, 2) is legal\n file.seek(file.tell() - 1, os.SEEK_SET) # then go backward\n file.truncate()\n file.write('};\\n')\n file.write('Physical Surface(\"wing_\") = {')\n for i in range(0, self.n-1):\n for j in range(3, 6):\n file.write('{0:d},'.format(self.surN[i][j]))\n file.seek(0, os.SEEK_END)\n file.seek(file.tell() - 1, os.SEEK_SET)\n file.truncate()\n file.write('};\\n')\n file.write('\\n')\n file.close()", "def render_inventory(panel, inventory):\n # Draw \"INVENTORY\" directly above the inventory\n tcod.console_print_ex(panel, 70, 1, tcod.BKGND_NONE, tcod.LEFT, \"INVENTORY:\")\n\n # Render each item's symbol\n dx = 0\n for item in inventory:\n if item is None:\n tcod.console_print_ex(panel, 70 + dx, 2, tcod.BKGND_NONE, tcod.LEFT, \" | \")\n else:\n tcod.console_print_ex(panel, 70 + dx, 2, tcod.BKGND_NONE, tcod.LEFT, item.char + \" | \")\n dx += 4" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
appends to what is currently in the stat window. This function is not currently called anywhere
def write_stat_append(self, text): row = self.stat_row lines = textwrap.wrap(text, 26) for line in lines: self.stat_win.addstr(row, ui.COL, line, curses.color_pair(3)) row += 1 if row >= STAT_WIN_ROWS: self.stat_win.refresh() break self.stat_win.refresh()
[ "def write_stat(self, text):\n\t\tself.stat_win.erase()\n\t\trow = 1\n\t\tlines = textwrap.wrap(text, 26)\n\t\tfor line in lines:\n\t\t\tline = line[:STAT_WIN_COLS-1]\n\t\t\tself.stat_win.addstr(row, ui.COL, line, curses.color_pair(2))\n\t\t\trow += 1\n\t\t\tif row >= STAT_WIN_ROWS: \n\t\t\t\tself.stat_win.refresh()\n\t\t\t\tbreak\n\t\tself.stat_win.refresh()\n\t\tself.stat_row = row", "def update_window(self):\n if self.window is not None:\n # First, clean-up\n actr.clear_exp_window()\n\n # Then, add new elements\n if self.phase == \"fixation\":\n item = actr.add_text_to_exp_window(self.window, \"+\",\n x = 400, y = 300,\n color = \"black\")\n \n elif self.phase == \"stimulus\":\n color = self.current_trial.color\n word = self.current_trial.word\n item = actr.add_text_to_exp_window(self.window, word,\n x=395, y= 300,\n color = color)\n\n for i, col in enumerate(COLOR_MAPPINGS):\n item = actr.add_text_to_exp_window(self.window,\n COLOR_MAPPINGS[col],\n x = 600 + i * 50,\n y = 500,\n color = col)\n print(type(COLOR_MAPPINGS))\n\n elif self.phase == \"done\":\n color = self.current_trial.color\n word = self.current_trial.word\n item = actr.add_text_to_exp_window(self.window, \"done\",\n x=395, y= 300,\n color = \"black\")", "def append(self, plot):\n super().append(plot)", "def update_aux_display(self):\n pass", "def display(self):\n self.window.erase()\n for idx, item in enumerate(self.items[self.top:self.top + self.max_lines]):\n # Highlight the current cursor line\n if idx == self.current:\n self.window.addstr(idx, 0, item, curses.color_pair(2))\n else:\n self.window.addstr(idx, 0, item, curses.color_pair(1))\n self.window.refresh()", "def glyphWindowOpenCB(self, info):\n glyphWindow = info[\"window\"]\n self.guideStatus.addViewToWindow(glyphWindow)", "def update_history(self):\n self.SetPoint.Clear() # clears menu\n choices = []\n for T in sorted(set(self.history)): choices += [str(T)]\n self.SetPoint.AppendItems(choices)", "def say(self, thing_to_say):\n\n self.append(thing_to_say)\n display.refresh_screen()", "def plusBtnClicked(self, stat):\n if self.updatedStats.skillPts == 0:\n return # no skill points available\n \n values = {'hp':5, 'mana':5, 'stamina':5,'strength':0.1, 'atkSpeed':0.05}\n self.updatedStats.__dict__[stat] += values[stat] # increment the selected stat\n self.usedPts[stat] += 1 # skill point used\n self.updatedStats.skillPts -= 1 # remove one from availabe\n self.updateStatsUI()", "def open_stat(self):\r\n hp = f\"HP: {self.player.hp}/{self.player.max_hp}\".ljust(10)\r\n lv = f\"LV: {self.player.lv}\".ljust(10)\r\n name = self.player.name\r\n layout = [\r\n [sg.Text(f'\"{name}\"')],\r\n [sg.Text(lv)],\r\n [sg.Text(hp)],\r\n [sg.Button(\"Exit\", size=(10, 1), button_color=(\"#edf2ce\", \"#444444\"))]\r\n ]\r\n window = sg.Window(\"Stats\", layout, size=(250, 500), element_justification='c')\r\n window.read()\r\n window.close()", "def merge_next(self):\r\n \r\n if self.current_window == data.windows.__len__()-1 :\r\n self.gui.addHelpMessage(\"Can't merge the last window with a following window.\")\r\n else:\r\n self.merge(self.current_window,self.current_window+1)", "def _update_w(self):\n pass", "def appendString(self, fs):\n if self.lastTextBox is None:\n self.newTextBox(fs) # Also sets self.lastTextBox \n else:\n self.lastTextBox.appendString(fs)", "def append_text(self, text: str, color: QColor=QColor(0, 0, 0)) -> None:\n\n self.gui.textWindow.setTextColor(color)\n self.gui.textWindow.append(text)\n maxVal = self.gui.textWindow.verticalScrollBar().maximum()\n self.gui.textWindow.verticalScrollBar().setValue(maxVal)", "def refresh(self):\n self.parent.refreshOnScreenInfo()", "def refresh_display(self):\n print('---'*20)\n print(\"Champion \" + self.name)\n print('Health: {0} Stamina: {1}'.format(round(self.current_health,2),\n round(self.current_stamina,2)))", "def add_entry_window(self):\n add_entry_window = tk.Toplevel(self.root)\n add_entry_widget = AddModification(path=self.parent.app_project.project.path,callback=self.add_item,root=add_entry_window)\n add_entry_window.transient(self.root)", "def updateStatsUI(self):\n self.hpLabel.set_text(str(self.updatedStats.hp))\n self.manaLabel.set_text(str(self.updatedStats.mana))\n self.staminaLabel.set_text(str(self.updatedStats.stamina))\n self.strLabel.set_text(\"{:.2f}\".format(self.updatedStats.strength))\n self.speedLabel.set_text(\"{:.2f}\".format(self.updatedStats.atkSpeed))\n self.skillLabel.set_text(str(self.updatedStats.skillPts))", "def add_extra_info(self, message):\n with console_lock:\n self.extra_info.append(message)\n if self.parent is not None:\n self.parent.update()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
writes a list or string to the time window stops when we get to the bottom of the window
def write_time(self, text): self.time_win.erase() row = 1 for line in text: self.time_win.addstr(row, ui.COL, line, curses.color_pair(4)) row += 1 if row >= TIME_WIN_ROWS: break
[ "def test_sliding_time_window(self):\n dst = \"ngc5921.split.sliding_time_window.ms\"\n ref = 'ngc5921_statwt_ref_test_sliding_time_window.ms'\n timebin = \"300s\"\n \"\"\"\n row_to_rows = []\n row_to_rows.append([0, 6])\n row_to_rows.append([0, 7])\n row_to_rows.append([0, 8])\n row_to_rows.append([0, 9])\n row_to_rows.append([0, 9])\n row_to_rows.append([0, 10])\n row_to_rows.append([1, 12])\n row_to_rows.append([2, 12])\n row_to_rows.append([3, 12])\n row_to_rows.append([5, 12])\n row_to_rows.append([6, 12])\n row_to_rows.append([6, 12])\n row_to_rows.append([12, 17])\n row_to_rows.append([12, 17])\n row_to_rows.append([12, 17])\n row_to_rows.append([12, 17])\n row_to_rows.append([12, 17])\n row_to_rows.append([17, 20])\n row_to_rows.append([17, 21])\n row_to_rows.append([17, 22])\n row_to_rows.append([18, 23])\n row_to_rows.append([19, 24])\n row_to_rows.append([20, 25])\n row_to_rows.append([21, 26])\n row_to_rows.append([22, 27])\n row_to_rows.append([23, 28])\n row_to_rows.append([24, 29])\n row_to_rows.append([25, 30])\n row_to_rows.append([26, 31])\n row_to_rows.append([27, 32])\n row_to_rows.append([28, 33])\n row_to_rows.append([29, 33])\n row_to_rows.append([30, 33])\n row_to_rows.append([33, 35])\n row_to_rows.append([33, 35])\n row_to_rows.append([35, 38])\n row_to_rows.append([35, 38])\n row_to_rows.append([35, 38])\n row_to_rows.append([38, 41])\n row_to_rows.append([38, 42])\n row_to_rows.append([38, 43])\n row_to_rows.append([39, 44])\n row_to_rows.append([40, 45])\n row_to_rows.append([41, 46])\n row_to_rows.append([42, 47])\n row_to_rows.append([43, 48])\n row_to_rows.append([44, 49])\n row_to_rows.append([45, 50])\n row_to_rows.append([46, 51])\n row_to_rows.append([47, 52])\n row_to_rows.append([48, 53])\n row_to_rows.append([49, 54])\n row_to_rows.append([50, 55])\n row_to_rows.append([51, 56])\n row_to_rows.append([52, 56])\n row_to_rows.append([53, 56])\n row_to_rows.append([56, 60])\n row_to_rows.append([56, 60])\n row_to_rows.append([56, 60])\n row_to_rows.append([56, 60])\n \"\"\"\n shutil.copytree(src, dst)\n myms.open(dst, nomodify=False)\n myms.statwt(timebin=timebin, slidetimebin=True)\n myms.done()\n # self._check_weights(\n # dst, row_to_rows, 'c', None, False, None, None\n # )\n self.compare(dst, ref)\n shutil.rmtree(dst)", "def add_to_events_to_draw(vDict, textEvent):\n\n windowDict = vDict['windowDict']\n\n textWindow = windowDict['textWindow']\n\n blankEventText = vDict['blankEventText']\n\n windowDict = vDict['windowDict']\n\n textWindow = windowDict['textWindow']\n\n lastEvent = vDict['eventsToPrint'][-1]\n\n match = textEvent == lastEvent.rstrip(' ')\n\n if match:\n # if textEvent equals the last line in the eventsToPrint list\n vDict['eventsToPrint'][-1] = '{} <x{}>'.format(textEvent, '2')\n elif vDict['eventsToPrint'][-1].startswith(textEvent):\n # elif the last line in the eventsToPrint starts with textEvent\n st = vDict['eventsToPrint'][-1].split(' <x')\n try:\n st1, st2 = st[0], int(st[1].strip('>'))\n vDict['eventsToPrint'][-1] = '{} <x{}>'.format(textEvent, st2 + 1)\n except IndexError:\n print('Index error')\n print(st)\n else:\n vDict['eventsToPrint'].popleft()\n vDict['eventsToPrint'].append(blankEventText.format(textEvent))\n #l = vDict['eventsToPrint'].pop(0)\n #vDict['eventsToPrint'].append(blankEventText.format(textEvent))\n\n for t in range(vDict['INFO_WINDOW_HEIGHT'] - 2):\n try:\n textWindow.draw_str(1, 1 + t, blankEventText)\n events_print = vDict['eventsToPrint'][t]\n textWindow.draw_str(1, 1 + t, events_print)\n except tdl.TDLError:\n pass\n\n # tdl.flush()", "def time_window_end(self, time_window_end):\n\n self._time_window_end = time_window_end", "def bigTimeView(self):\n\n now=time.localtime()\n hrs=int(time.strftime(\"%H\"))\n minutes=int(time.strftime(\"%M\"))\n sec=int(time.strftime(\"%S\"))\n \n # Build string representing top and bottom rows\n L1=\"0\"+str(digits[hrs][0]).zfill(5)+str(digits[minutes][0]).zfill(5)+str(digits[sec][0]).zfill(5)\n L2=\"0\"+str(digits[hrs][1]).zfill(5)+str(digits[minutes][1]).zfill(5)+str(digits[sec][1]).zfill(5)\n \n # Convert strings from digits into pointers to custom character\n i=0\n XL1=\"\"\n XL2=\"\"\n while i < len(L1):\n XL1=XL1+chr(int(L1[i]))\n XL2=XL2+chr(int(L2[i]))\n i += 1\n \n self.writeLCD(XL1+\"\\n\" +XL2)", "def _flush_frame(logs, output):\n for timestamp in sorted(logs):\n entries = logs[timestamp]\n (level, color, pkrid, process, source, logger, log) = entries[0]\n try:\n lcolor = LEVEL_COLORS[level]\n except KeyError:\n lcolor = LEVEL_COLORS['E']\n lcolor = 16 + 36 * lcolor[0] + 6 * lcolor[1] + lcolor[2]\n color = 16 + 36 * color[0] + 6 * color[1] + color[2]\n # print the first line with the timestamp\n output.write(\"\\033[38;5;%dm\" % lcolor)\n output.write(\"%s|\" % level)\n output.write(timestamp)\n output.write(\"|\\033[38;5;%dm%s:%s|%s|%s|%s\\033[39m\\n\"\n % (color, pkrid, process, source, logger, log))\n dots = \".\" * len(timestamp)\n\n # then print all remaining lines (for the considered timestamp)\n for (level, color, pkrid, process, source, logger, log) in entries[1:]:\n lcolor = LEVEL_COLORS[level]\n lcolor = 16 + 36 * lcolor[0] + 6 * lcolor[1] + lcolor[2]\n output.write(\"\\033[38;5;%dm\" % lcolor)\n output.write(\"%s|%s\" % (level, dots))\n output.write(\"|\\033[38;5;%sm%s:%s|%s|%s|%s\\033[39m\\n\"\n % (color, pkrid, process, source, logger, log))", "def write_time(self):\n time_now = str(datetime.now())\n loc = (self.WIDTH - 4*len(time_now), self.HEIGHT - 10)\n\n cv2.putText(self.img_data, time_now, loc, self.FONT, 0.24, self.RED, 1, cv2.LINE_AA)\n return None", "def write_stat_append(self, text):\n\t\trow = self.stat_row\n\t\tlines = textwrap.wrap(text, 26)\n\t\tfor line in lines:\n\t\t\tself.stat_win.addstr(row, ui.COL, line, curses.color_pair(3))\n\t\t\trow += 1\n\t\t\tif row >= STAT_WIN_ROWS: \n\t\t\t\tself.stat_win.refresh()\n\t\t\t\tbreak\n\t\tself.stat_win.refresh()", "def listQueue(self):\n\n queue = self.library.list_tracks()\n if len(queue) > 0:\n listWin = curses.newwin(len(queue), 40, 5, 50)\n for i in range(len(queue)):\n listWin.addstr(i, 0, queue[i])\n self.stdscr.refresh()\n curses.echo()\n listWin.getch()\n curses.noecho()\n del listWin\n self.stdscr.touchwin()\n self.stdscr.refresh()\n else:\n self.printError('Nothing to list')", "def make_time_windows(dataset, w):\n num_participants, full_length, _ = np.shape(dataset)\n time_windows = []\n\n for i in list(range(num_participants)): # i = participant's position in dataset\n\n for j in list(range(full_length-w+1)): # j = row number of first row in window\n time_windows.append(dataset[i,j:j+w,:])\n\n return np.stack(time_windows)", "def draw_time_left(self, text, value):\n\n self.draw_filled_rect(64, 0, 127, 31, self.training_back)\n self.draw_filled_rect(64, 0, 64+value, 31, self.training_bar)\n self.graphics.DrawText(self.canvas, self.font, 64+2, 12, self.training_text, text)\n self.graphics.DrawText(self.canvas, self.font, 64+2, 31, self.training_text, str(value))", "def list_maxtime(self, list_maxtime):\n if (type(list_length) == type(120000)) and (list_length > 0) :\n out = \"t{}\\n\".format(int(list_maxtime))\n self.transfer_ESP32(out)\n else:\n print(\"max time must be an integer larger than zero\")", "def window(self, created_at):\n counter = 0\n while len(self.tweets):\n (t, edgelist) = self.tweets[0]\n if(created_at - t) <= 60:\n break\n self.remove(edgelist) # Time delta is greater than 60 seconds\n self.tweets.popleft()", "def time_track_print():\n\tglobal _time_track_dict\n#\tif not _time_track_dict.values(): return\n\tmax_time = max(_time_track_dict.values())\n\ttupel_list = [(fn_name, \"%.2f%%\" % (100*exe_time/max_time), \"%fs\" % exe_time) for (fn_name, exe_time) in sorted(_time_track_dict.items(), key=operator.itemgetter(1), reverse=True)]\n\tmax_len_item_1 = max([len(x) for (x,_,_) in tupel_list])\n\tmax_len_item_2 = max([len(x) for (_,x,_) in tupel_list])\n\tmax_len_item_3 = max([len(x) for (_,_,x) in tupel_list])\n\tfor (x,y,z) in tupel_list:\n\t\tprint x.ljust(max_len_item_1 + 3), y.rjust(max_len_item_2), z.rjust(max_len_item_3 + 3)", "def words_per_minute(data, pause_thresh=10**10, window=10**10):\r\n\r\n pause_thresh = pause_thresh / 10**9\r\n window = window / 10**9\r\n # windows per minute\r\n win_per_min = 60 / window\r\n\r\n # print(data)\r\n\r\n all_words = data[-1][\"words\"]\r\n # for item in data:\r\n # all_words += item[\"words\"]\r\n\r\n all_speakers = set([w[\"speakerTag\"] for w in all_words])\r\n out = {}\r\n for s in all_speakers:\r\n words = sorted([w for w in all_words if w[\"speakerTag\"] == s],\r\n key=lambda x: x[\"startTimeSeconds\"] + x[\"startTimeNanos\"] / 10**9)\r\n out[s] = []\r\n queue = [words[0]]\r\n for i in range(1, len(words)):\r\n next_word = words[i]\r\n word_time = sec_from_word(next_word)\r\n time_diff = word_time - sec_from_word(queue[0])\r\n while time_diff > window and len(queue) != 0:\r\n if len(queue) == 1:\r\n out[s].append([sec_from_word(queue[0])+window, 0])\r\n queue = queue[1:]\r\n if len(queue) == 0:\r\n break\r\n time_diff = word_time - sec_from_word(queue[0])\r\n queue.append(next_word)\r\n out[s].append([word_time, len(queue) * win_per_min])\r\n\r\n return out", "def write(self, idle_ms, window=None):\n now = datetime.datetime.now()\n loadavg = ','.join(str(l) for l in os.getloadavg())\n win_types, win_name = window or (None, None)\n type_str = ','.join(str(win_type) for win_type in (win_types or []))\n self.logger.info('%s %d %s %s %s',\n now, idle_ms, loadavg, type_str, win_name or '')", "def appendmessages(self, name, msg):\r\n \r\n time = strftime(\"%H:%M\")\r\n return(time+ ' ' + name + ': ' + msg)", "def update():\n seconds = 0 if self.start_time == 0 else round(time.time() - self.start_time)\n hours = seconds // 3600\n seconds = seconds % 3600\n minutes = seconds // 60\n seconds = seconds % 60\n cur_time = \"\"\n if hours < 10:\n cur_time += \"0\" + str(hours) + \":\"\n else:\n cur_time += str(hours) + \":\"\n if minutes < 10:\n cur_time += \"0\" + str(minutes) + \":\"\n else:\n cur_time += str(minutes) + \":\"\n if seconds < 10:\n cur_time += \"0\" + str(seconds)\n else:\n cur_time += str(seconds)\n\n self.formatted_time.set(cur_time)\n self.last_after = self.root.after(200, update)", "def move_sleepMem(self, window_size):\r\n to_sleep = np.where((self.t - np.array([d['t'] for d in self.model])) > window_size)[0]\r\n if len(to_sleep)>0:\r\n self.sleep_mem += list(self.model[i] for i in to_sleep)\r\n for i in reversed(to_sleep):\r\n self.model.pop(i)", "def print_end(self):\n self.time_writer('Time at the end of the Spider: %s'\n % str(datetime.now()))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
refreshes all screens except the back window
def refresh_all(self): self.stat_win.refresh() self.input_win.refresh() self.time_win.refresh() self.main_win.refresh()
[ "def refresh(self):\n\n for win in self.get_window():\n win.refresh()\n self.scr.refresh()", "def _refresh_all(self) -> None:\n self._window_all.refresh()", "def back_to_home_gui(self):\n self.forget_non_home_gui()\n self.seeds_path.set(\"\")\n self.initilize_gui()", "def refresh(self):\n self.parent.refreshOnScreenInfo()", "def back_main_button(self) -> None:\n self.window.destroy()\n MainMenu.execute()", "def back_button_handler(self):\n self.init_window()\n self.set_background()\n self.create_welcome_window()", "def checkScreen(self):\r\n if not window.screen == self.screen:\r\n window.setScreen(self.screen)", "def refresh_display(screen_def):\n try:\n screen_def['hlist'].delete_all()\n except:\n pass\n if debug():\n logger.debug(\"query=\",screen_def['query']) \n displayed_query = display_window(screen_def)", "def to_prev_screen(self) -> None:\n if self.game_mode == 'comp' and self.num_players == 2:\n self.reset_num_screen()\n self.parent.current = 'menu'\n elif self.game_mode == 'game' or (self.game_mode == 'comp' and self.num_players > 2):\n self.reset_num_screen()\n self.parent.current = 'number'\n elif self.game_mode == 'solo':\n self.reset_goal_screen()\n self.parent.current = 'goal'\n self.clear_widgets(self.children[:-2])", "def refresh_home(self):\r\n\t\tself.home.set_view(self._app_data.users.my_jobs_at_a_glance)", "def flush(self):\r\n # Draw the screen to the window\r\n self._window.blit(self._screen, (UI_PANEL_WIDTH, 0))\r\n # Draw the window to the screen\r\n self._master_screen.blit(\r\n pygame.transform.scale(self._window, self._master_screen.get_rect().size),\r\n (0, 0),\r\n )\r\n # Carry out the buffered draw calls\r\n if len(self._hd_draws) > 0:\r\n self._master_screen.blits(self._hd_draws)\r\n self._hd_draws.clear()\r\n # Update the display window\r\n pygame.display.update()", "def reset_main_menu_interface():\n screen_reset()\n print_main_interface()", "def leave(self, *args: list) -> None:\n self.dismiss()\n screen_manager = self.parent.children[1]\n results = next(screen for screen in screen_manager.screens if screen.name == 'results')\n\n for screen in screen_manager.screens:\n if screen.name == 'number':\n results.reset_num_screen(screen)\n\n if screen.name == 'name':\n results.reset_name_screen(screen)\n\n if screen.name == 'game':\n results.reset_game_screen(screen, play_again=False)\n\n if screen.name == 'goal':\n results.reset_goal_screen(screen)\n\n if screen.name == 'solo':\n results.reset_solo_screen(screen, play_again=False)\n\n screen_manager.current = 'menu'", "def clear_old_scenes(self): \n pass", "def refreshClick():\n global games\n games = buildGamesMenu()", "def reset_play_game_mode_interface():\n screen_reset()\n print_play_game_header()\n print_play_game_menu()", "def clear_screen():\n\tos.system('cls')", "def _quit_fscreen(self, event):\n self.fs = False\n self.window.attributes(\"-fullscreen\", self.fs)", "def reset_login_screen(self):\n if self.root.ids.id_newidentity.ids.add_random_bx.children:\n self.root.ids.id_newidentity.ids.add_random_bx.clear_widgets()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
validates that the terminal is a large enough size to play the game in curses
def terminal_size(self): # rows, columns = os.popen('stty size', 'r').read().split() try: rows, columns = subprocess.check_output(['stty','size']).decode().split() if int(rows) >= int(MIN_ROWS) and int(columns) >= int(MIN_COLS): return True return False except Exception: return False
[ "def screenSizeAdjust(): \n cls()\n printNow(\"RESIZE JES COMMAND WINDOW TO HERE\\n\" + '-'*70 + '\\n'* 18 +\n \"For the best experience, please resize the JES command window so \" +\n \"that the message\\n'RESIZE JES COMMAND WINDOW TO HERE' above \" +\n \"is visible on your screen.\\n\\n\" + \n \"Press ENTER when ready to begin or 'q' to quit ...\")\n resp = raw_input()\n if resp and resp[0].lower() == 'q':\n return False\n else:\n return True", "def check_size(m, n):\n\n if sys.platform.startswith('freebsd') or sys.platform == 'linux' or sys.platform == 'darwin':\n\n stty = subprocess.run([\"stty\", \"size\"], stdout=subprocess.PIPE)\n\n height, width = stty.stdout.strip().split(b' ')\n height, width = int(height), int(width)\n else:\n height, width = 25, 80\n\n warn = \"Size of cell field bigger than terminal size. Can't display. Choose {} <= {}\"\n\n if m <= 0 or n <= 0:\n return f\"M,N must be > 0 and be smaller than terminal dimensions({height}x{width})\"\n\n elif height < m:\n return warn.format(\"m\", height)\n\n elif width < n:\n return warn.format(\"n\", width)\n\n else:\n return True", "def checkReq():\n global MAX_Y, MAX_X\n MAX_Y = curses.LINES - 1\n MAX_X = curses.COLS - 1\n if MAX_Y < REQ_Y:\n printError(\"Resize your window. \" +\n \"Current: \" + str(MAX_X) + \" X \" + str(MAX_Y) + \". \"\n \"Required: \" + str(REQ_X) + \" X \" + str(REQ_Y) )", "def _force_minimum_size() -> None:\n\n # As of 2020, this worked in both the standard Apple terminal and Debian xterm.\n #\n # In 2022, it no longer works in Debian. Even in the Apple terminal, it only\n # works if the terminal font and monitor actually allow the requested size, and\n # there's no indication whether it worked or not.\n #\n # I'm apparently using a slightly larger font now than when I originally wrote\n # this code, and these days my terminal can't successfully resize past 155x59 on\n # my Macbook. The original rendering needed at least 155x70. To deal with this,\n # I added the TerminalSizeError error handling block (above) to explictly detect\n # that it isn't possible to render the board, and I also adjusted the rendering\n # to work in a slightly smaller terminal.\n #\n # See: https://apple.stackexchange.com/a/47841/249172\n\n print(\"\\u001b[8;%d;%dt\" % (_MIN_ROWS, _MIN_COLS))\n sleep(0.5) # wait for the window to finish resizing; if we try to render before it's done, the window gets hosed up", "def set_size_from_terminal(self):\n self.rows, self.columns = os.popen('stty size', 'r').read().split()\n self.width = int(self.columns)\n self.height = int(self.rows) - 2", "def terminal_size():\n cols, rows = get_terminal_size()\n ratio = theme.BOUNDS / theme.WIDTH\n \n theme.WIDTH = cols\n theme.BOUNDS = theme.WIDTH - int(theme.WIDTH * ratio)\n \n if cols < theme.BOUNDS:\n # 14 = amount of constant space taken by progress bar\n theme.PROGRESS = abs(cols - 14)", "def size(self, size):\n n_lines, n_cols = size\n getmaxyx = YX(*self.tui.stdscr.getmaxyx())\n if n_lines is None:\n n_lines = getmaxyx.y - self.start.y\n if n_cols is None:\n n_cols = getmaxyx.x - self.start.x\n self.win.resize(n_lines, n_cols)", "def _ch_resize(self):\n self.main(self.stdscr)", "def resize_console(rows, cols):\r\n\r\n if cols < 32:\r\n cols = 32\r\n\r\n if sys.platform.startswith('win'):\r\n command = \"mode con: cols={0} lines={1}\".format(cols + cols, rows + 5)\r\n os.system(command)\r\n elif sys.platform.startswith('linux'):\r\n command = \"\\x1b[8;{rows};{cols}t\".format(rows=rows + 3, cols=cols + cols)\r\n sys.stdout.write(command)\r\n else:\r\n print(\"Unable to resize terminal. Your operating system is not supported.\\n\\r\")", "def change_font_size_in_terminal(should_be_big):\n\n run_applescript('tell application \"Terminal\" to ' \\\n 'set font size of current settings of front window to '\n '{0}'.format(BIG_FONT if should_be_big else SMALL_FONT))", "def check_boardsize():\n return BOARD_SIZE % 2 == 0", "def test_console_width_is_positive():\n assert console.columns() > 0", "def get_scr_size():\n rows, cols = os.popen('stty size', 'r').read().split()\n # these come as strings so we cast them to int\n return (int(rows), int(cols))", "def assert_window_size(win_size):\n assert win_size >= 3, 'ERROR: win size must be at least 3'\n\n if win_size % 2 == 0:\n print ('It is highly recommended to user odd window sizes. You provided %s, an even number.' % (win_size, ))", "def test_console_height_is_positive():\n assert console.lines() > 0", "def __do_step_choose_board_size(self):\r\n params = self._prepare_values_to_be_rendered()\r\n params.instruction = \"Choose the size of the game board!\"\r\n if self._state.game.engine_choice == Games.ALQUERQUE:\r\n params.options.update({\r\n \"5\": \"5x5\",\r\n \"7\": \"7x7\"\r\n })\r\n elif self._state.game.engine_choice == Games.BAUERNSCHACH:\r\n params.options.update({\r\n \"4\": \"4x4\",\r\n \"5\": \"5x5\",\r\n \"6\": \"6x6\",\r\n \"7\": \"7x7\",\r\n \"8\": \"8x8\"\r\n })\r\n self._gui.print_screen(params)\r\n\r\n input = self._read_input()\r\n\r\n if not self._handle_common_inputs(input, params.options):\r\n if int(input) in range(4, 9):\r\n self._state.game.board_size = int(input)\r\n self._state.feedback = \"You have chosen a board of size \" \\\r\n + input + \".\"\r\n self._state.activity = States.CHOOSE_OPP", "def check_channel_pty_request(self, channel, term, width, height,\n pixelwidth, pixelheight, modes):\n return True", "def get_terminal_width(self):\n width = 60 # Use this as a minimum\n try:\n size = os.get_terminal_size()\n except OSError:\n size = None\n if size and size[0] > width:\n width = size[0]\n if os.name == 'nt':\n width -= 1 # Windows needs 1 empty space for newline\n return width", "def is_levelup_screen(self):\n # This is implemented as reading some text on the screen instead of\n # using get_text() because checking every loop is really slow.\n\n address = 0xc50f\n values = [146, 143, 130, 139]\n\n for (index, value) in enumerate(values):\n if self.emulator.vba.read_memory_at(address + index) != value:\n return False\n else:\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
initializes some colors pairs for curses to be used when printing text
def init_colors(self): curses.init_pair(1, curses.COLOR_RED, curses.COLOR_BLACK) curses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_BLACK) curses.init_pair(3, curses.COLOR_BLUE, curses.COLOR_BLACK) curses.init_pair(4, curses.COLOR_YELLOW, curses.COLOR_BLACK) curses.init_pair(5, curses.COLOR_CYAN, curses.COLOR_BLACK)
[ "def __init__(self, stdscr, pair_number, fg_color, bg_color = -1):\n self.pair_number = pair_number\n curses.init_pair(pair_number, fg_color, bg_color)\n self.stdscr = stdscr", "def init_colors():\n curses.initscr()\n curses.start_color()\n curses.use_default_colors()\n # default 8 colors of terminal\n curses.init_pair(1, curses.COLOR_WHITE, -1)\n curses.init_pair(2, curses.COLOR_BLUE, -1)\n curses.init_pair(3, curses.COLOR_CYAN, -1)\n curses.init_pair(4, curses.COLOR_GREEN, -1)\n curses.init_pair(5, curses.COLOR_MAGENTA, -1)\n curses.init_pair(6, curses.COLOR_RED, -1)\n curses.init_pair(7, curses.COLOR_YELLOW, -1)\n global_vars.colors = {\n 'white': curses.color_pair(1),\n 'blue': curses.color_pair(2),\n 'cyan': curses.color_pair(3),\n 'green': curses.color_pair(4),\n 'magenta': curses.color_pair(5),\n 'red': curses.color_pair(6),\n 'yellow': curses.color_pair(7),\n }\n global_vars.color_names = list(global_vars.colors.keys())", "def _setup_colors(\n self,\n ) -> None:\n pass", "def __init__(self):\n if sys.stdout.isatty():\n self.HEADER = '\\033[95m'\n self.OKBLUE = '\\033[94m'\n self.OKGREEN = '\\033[92m'\n self.WARNING = '\\033[93m'\n self.FAIL = '\\033[91m'\n self.ENDC = '\\033[0m'\n self.BOLD = '\\033[1m'\n self.UNDERLINE = '\\033[4m'", "def _cb_print_colors(ctx, param, value):\n\n if not value or ctx.resilient_parsing:\n return\n for color in gj2ascii.DEFAULT_COLOR_CHAR.keys():\n click.echo(color)\n ctx.exit()", "def set_background_colors(self) -> None:\n self._window_all.bkgd(\" \", curses.color_pair(m_color_pair.ColorPair.BLACK_N_WHITE.value))", "def color_guide():\n print('\\n')\n print('\\u001b[1mStandard Colors\\u001b[0m\\n')\n for j in range(0, 8):\n code = str(j)\n print(f\"\\u001b[48;5;{code}m {code.center(8)}\", end='')\n print(\"\\u001b[0m\")\n\n print('\\n')\n print('\\u001b[1mHigh-Intensity Colors\\u001b[0m\\n')\n for j in range(8, 16):\n code = str(j)\n print(f\"\\u001b[48;5;{code}m {code.center(8)}\", end='')\n print(\"\\u001b[0m\")\n\n print('\\n')\n print('\\u001b[1mColors\\u001b[0m\\n')\n for m in range(0, 6):\n for n in range(0, 36):\n code = str(m * 36 + (n + 16))\n print(f\"\\u001b[48;5;{code}m {code.ljust(3)}\", end='')\n print(\"\\u001b[0m\")\n\n print('\\n')\n print('\\u001b[1mGrayscale colors\\u001b[0m\\n')\n for j in range(232, 256):\n code = str(j)\n print(f\"\\u001b[48;5;{code}m {code.ljust(5)}\", end='')\n print(\"\\u001b[0m\")", "def color(message='hello world'):\n while True:\n print(c.rc() + message + c.reset, end=\" \")", "def print_all_colors(): # pylint: disable=too-many-statements\n print('\\n\\t----------------------------')\n print('\\tPrinting All Color Functions')\n print('\\t----------------------------')\n\n print('\\tnormal: \\t\\t' + colors.normal('normal'))\n print('\\tunderline: \\t\\t' + colors.underline('underline'))\n print('\\tbold: \\t\\t\\t' + colors.bold('bold'))\n print('\\tblink: \\t\\t\\t' + colors.blink('blink'))\n print('\\trblink: \\t\\t' + colors.rblink('rblink'))\n print('\\treverse: \\t\\t' + colors.reverse('reverse'))\n print('\\tconceal: \\t\\t' + colors.conceal('conceal'))\n print('\\tblack: \\t\\t\\t' + colors.black('black'))\n print('\\tred: \\t\\t\\t' + colors.red('red'))\n print('\\tgreen: \\t\\t\\t' + colors.green('green'))\n print('\\tyellow: \\t\\t' + colors.yellow('yellow'))\n print('\\tblue: \\t\\t\\t' + colors.blue('blue'))\n print('\\tmagenta: \\t\\t' + colors.magenta('magenta'))\n print('\\tcyan: \\t\\t\\t' + colors.cyan('cyan'))\n print('\\twhite: \\t\\t\\t' + colors.white('white'))\n print('\\ton_black: \\t\\t' + colors.on_black('on_black'))\n print('\\ton_red: \\t\\t' + colors.on_red('on_red'))\n print('\\ton_green: \\t\\t' + colors.on_green('on_green'))\n print('\\ton_yellow: \\t\\t' + colors.on_yellow('on_yellow'))\n print('\\ton_blue: \\t\\t' + colors.on_blue('on_blue'))\n print('\\ton_magenta: \\t\\t' + colors.on_magenta('on_magenta'))\n print('\\ton_cyan: \\t\\t' + colors.on_cyan('on_cyan'))\n print('\\ton_white: \\t\\t' + colors.on_white('on_white'))\n print('\\tred_on_black: \\t\\t' + colors.red_on_black('red_on_black'))\n print('\\tgreen_on_black: \\t' + colors.green_on_black('green_on_black'))\n print('\\tyellow_on_black: \\t' + colors.yellow_on_black('yellow_on_black'))\n print('\\tblue_on_black: \\t\\t' + colors.blue_on_black('blue_on_black'))\n print('\\tmagenta_on_black: \\t' + colors.magenta_on_black('magenta_on_black'))\n print('\\tcyan_on_black: \\t\\t' + colors.cyan_on_black('cyan_on_black'))\n print('\\twhite_on_black: \\t' + colors.white_on_black('white_on_black'))\n print('\\tblack_on_red: \\t\\t' + colors.black_on_red('black_on_red'))\n print('\\tgreen_on_red: \\t\\t' + colors.green_on_red('green_on_red'))\n print('\\tyellow_on_red: \\t\\t' + colors.yellow_on_red('yellow_on_red'))\n print('\\tblue_on_red: \\t\\t' + colors.blue_on_red('blue_on_red'))\n print('\\tmagenta_on_red: \\t' + colors.magenta_on_red('magenta_on_red'))\n print('\\tcyan_on_red: \\t\\t' + colors.cyan_on_red('cyan_on_red'))\n print('\\twhite_on_red: \\t\\t' + colors.white_on_red('white_on_red'))\n print('\\tblack_on_green: \\t' + colors.black_on_green('black_on_green'))\n print('\\tred_on_green: \\t\\t' + colors.red_on_green('red_on_green'))\n print('\\tyellow_on_green: \\t' + colors.yellow_on_green('yellow_on_green'))\n print('\\tblue_on_green: \\t\\t' + colors.blue_on_green('blue_on_green'))\n print('\\tmagenta_on_green: \\t' + colors.magenta_on_green('magenta_on_green'))\n print('\\tcyan_on_green: \\t\\t' + colors.cyan_on_green('cyan_on_green'))\n print('\\twhite_on_green: \\t' + colors.white_on_green('white_on_green'))\n print('\\tblack_on_yellow: \\t' + colors.black_on_yellow('black_on_yellow'))\n print('\\tred_on_yellow: \\t\\t' + colors.red_on_yellow('red_on_yellow'))\n print('\\tgreen_on_yellow: \\t' + colors.green_on_yellow('green_on_yellow'))\n print('\\tblue_on_yellow: \\t' + colors.blue_on_yellow('blue_on_yellow'))\n print('\\tmagenta_on_yellow: \\t' + colors.magenta_on_yellow('magenta_on_yellow'))\n print('\\tcyan_on_yellow: \\t' + colors.cyan_on_yellow('cyan_on_yellow'))\n print('\\twhite_on_yellow: \\t' + colors.white_on_yellow('white_on_yellow'))\n print('\\tblack_on_blue: \\t\\t' + colors.black_on_blue('black_on_blue'))\n print('\\tred_on_blue: \\t\\t' + colors.red_on_blue('red_on_blue'))\n print('\\tgreen_on_blue: \\t\\t' + colors.green_on_blue('green_on_blue'))\n print('\\tyellow_on_blue: \\t' + colors.yellow_on_blue('yellow_on_blue'))\n print('\\tmagenta_on_blue: \\t' + colors.magenta_on_blue('magenta_on_blue'))\n print('\\tcyan_on_blue: \\t\\t' + colors.cyan_on_blue('cyan_on_blue'))\n print('\\twhite_on_blue: \\t\\t' + colors.white_on_blue('white_on_blue'))\n print('\\tblack_on_magenta: \\t' + colors.black_on_magenta('black_on_magenta'))\n print('\\tred_on_magenta: \\t' + colors.red_on_magenta('red_on_magenta'))\n print('\\tgreen_on_magenta: \\t' + colors.green_on_magenta('green_on_magenta'))\n print('\\tyellow_on_magenta: \\t' + colors.yellow_on_magenta('yellow_on_magenta'))\n print('\\tblue_on_magenta: \\t' + colors.blue_on_magenta('blue_on_magenta'))\n print('\\tcyan_on_magenta: \\t' + colors.cyan_on_magenta('cyan_on_magenta'))\n print('\\twhite_on_magenta: \\t' + colors.white_on_magenta('white_on_magenta'))\n print('\\tblack_on_cyan: \\t\\t' + colors.black_on_cyan('black_on_cyan'))\n print('\\tred_on_cyan: \\t\\t' + colors.red_on_cyan('red_on_cyan'))\n print('\\tgreen_on_cyan: \\t\\t' + colors.green_on_cyan('green_on_cyan'))\n print('\\tyellow_on_cyan: \\t' + colors.yellow_on_cyan('yellow_on_cyan'))\n print('\\tblue_on_cyan: \\t\\t' + colors.blue_on_cyan('blue_on_cyan'))\n print('\\tmagenta_on_cyan: \\t' + colors.magenta_on_cyan('magenta_on_cyan'))\n print('\\twhite_on_cyan: \\t\\t' + colors.white_on_cyan('white_on_cyan'))\n print('\\tblack_on_white: \\t' + colors.black_on_white('black_on_white'))\n print('\\tred_on_white: \\t\\t' + colors.red_on_white('red_on_white'))\n print('\\tgreen_on_white: \\t' + colors.green_on_white('green_on_white'))\n print('\\tyellow_on_white: \\t' + colors.yellow_on_white('yellow_on_white'))\n print('\\tblue_on_white: \\t\\t' + colors.blue_on_white('blue_on_white'))\n print('\\tmagenta_on_white: \\t' + colors.magenta_on_white('magenta_on_white'))\n print('\\tcyan_on_white: \\t\\t' + colors.cyan_on_white('cyan_on_white'))", "def with_color(s, colors):\n return colors + s + Color.END", "def opensignals_color_pallet():\n\n return COLOR_LIST.__next__()", "def __init__(self, colors: List[Tuple[str, int, Tuple[float, float, float]]]):\n self._swatch = {}\n for color in colors:\n c = ColorItem(color)\n self._swatch[c.index] = c", "def orig_colors(self):\n\n # Colors used\n# self.colorHome = (255, 0, 0) # Home is Red\n# self.colorAway = (0, 0, 255) # Away is Blue\n self.colorHome = (0, 153, 0) # Home is Green\n# self.colorHome = (111, 38, 61) # Home is Maroon\n# self.colorHome2 = (255, 184, 28) # Home 2nd is Gold\n# self.colorHome = (0, 119, 139) # Home is Teal\n # self.colorHome2 = (32, 23, 71) # Home 2nd is Dark Purp\n self.colorHome2 = (255, 255, 255) # Home 2nd is White\n\n# self.colorAway = (112, 47, 138) # Away is Purple\n# self.colorAway2 = (255, 199, 44) # Away 2nd is Yellow\n# self.colorAway = (255, 0, 0) # Away is Red\n# self.colorAway2 = (255, 255, 255) # Home 2nd is White\n self.colorAway = (36, 62, 144) # Away is Royal Blue\n self.colorAway2 = (255, 205, 52) # Away 2nd is Gold Yel\n\n self.colorPlay = (255, 106, 0) # Play is Orange\n self.colorText = (255, 255, 255) # Text is White\n self.bg_color = (230, 230, 230) # Background is Grey", "def _set_color_list(self):\n # list of tyle choices\n for idx in range(len(COLOR)):\n self.font_color.Append(COLOR[idx], idx)", "def DefaultColorCoding():\n print((\"\\033[49m \\033[39m \"), end=' ') #set to default color coding, suppress newline", "def color(c):\n c = c.lower()\n ansi = {\n 'black': '\\033[0;30m',\n 'darkred': '\\033[0;31m',\n 'darkgreen': '\\033[0;32m',\n 'darkyellow': '\\033[0;33m',\n 'darkblue': '\\033[0;34m',\n 'darkmagenta': '\\033[0;35m',\n 'darkcyan': '\\033[0;36m',\n 'gray': '\\033[0;37m',\n\n 'darkgray': '\\033[1;30m',\n 'red': '\\033[1;31m',\n 'green': '\\033[1;32m',\n 'yellow': '\\033[1;33m',\n 'blue': '\\033[1;34m',\n 'magenta': '\\033[1;35m',\n 'cyan': '\\033[1;36m',\n 'white': '\\033[1;37m',\n\n 'blackbg': '\\033[40m',\n 'redbg': '\\033[41m',\n 'greenbg': '\\033[42m',\n 'yellowbg': '\\033[43m',\n 'bluebg': '\\033[44m',\n 'magentabg': '\\033[45m',\n 'cyanbg': '\\033[46m',\n 'whitebg': '\\033[47m',\n\n 'reset': '\\033[0;0m',\n 'bold': '\\033[1m',\n 'reverse': '\\033[2m',\n 'underline': '\\033[4m',\n\n 'clear': '\\033[2J',\n # 'clearline': '\\033[K',\n 'clearline': '\\033[2K',\n # 'save': '\\033[s',\n # 'restore': '\\033[u',\n 'save': '\\0337',\n 'restore': '\\0338',\n 'linewrap': '\\033[7h',\n 'nolinewrap': '\\033[7l',\n\n 'up': '\\033[1A',\n 'down': '\\033[1B',\n 'right': '\\033[1C',\n 'left': '\\033[1D',\n\n 'default': '\\033[0;0m',\n }\n if c.lower() == 'list':\n return ansi\n if c not in ansi:\n return ansi[\"default\"]\n return ansi[c]", "def setcolors(inputSNe):\n\n cm = pl.get_cmap('nipy_spectral')\n Nsne = len(inputSNe)\n print('Number of input supernovae is ', Nsne)\n sncolors = [''] * Nsne\n for i in range(Nsne):\n sncolors[i] = (cm(1. * i / Nsne))\n sncolors = np.asarray(sncolors)\n\n np.random.seed(666)\n np.random.shuffle(sncolors)\n pkl.dump(sncolors, open(\"input/sncolors.pkl\", 'wb'))\n return (sncolors)", "def create_game_over_palette():\n game_over_palette = displayio.Palette(1)\n game_over_palette[0] = (200, 200, 200)\n\n return game_over_palette", "def draw_multicolor_square(t,sz):\r\n for i in [\"red\",\"purple\",\"hotpink\",\"blue\"]:\r\n t.color(i)\r\n t.forward(sz)\r\n t.left(90)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
prints the actual help menu in curses in the main window
def print_help(self): self.main_win.erase() x, y = self.print_text(4,2,"Verb ", curses.A_BOLD) x, y = self.print_text(x,y,"::", curses.color_pair(2)) x, y = self.print_text(x,y," Explanation of verb usage") for key in VERB_DICT: y += 2 x = 4 self.print_text(x,y,key, curses.A_BOLD) self.print_text(15,y,"::", curses.color_pair(2)) self.print_text(19,y,VERB_DICT[key])
[ "def help_menu(self):\r\n self.game_help()\r\n title_screen()", "def printHelp():\n cls() # clear the screen\n printNow(help_msg_1)\n requestString(\"Press ENTER for the next page of help.\")\n cls()\n printNow(help_msg_2)", "def on_helpAboutMenuItem_activate(self,*args):\n print \"Help About\"", "def cb_help_main(self):\n self.update_help_window(UALIGN.helpstr_gui,\n title='uber_align_test.py: GUI help')", "def help_menu():\n print(\" Help Menu\")\n print(\"\"\"\n Movement: To move use simple commands you can say walk or\n move and a direction. i.e. 'move north' or 'go south'.\n\n Examine: To examine the area around you use the keyword\n examine or inspect and what ever you want to inspect.\n i.e. to look at the room use 'inspect room'.\n\n Items: Some rooms will have items that you can pick up.\n Use the keyword 'pick' to put an item into your inventory.\n i.e. 'pick up excaliber'.\n\n Help: If you need to be reminded of available actions\n while playing the game use the keyword 'help' to access\n the help menu.\n \"\"\")\n print(\"\")\n input(\"To return press enter.\")\n return True", "def help(self):\n\n self.help_wdw = HelpWindow()\n self.help_wdw.show()", "def OnMenuHelpHintsMenu(self, event):\r\n \r\n Terminal.Feed(\"help\")\r\n # event.Skip()\r", "def commands():\r\n print(\"HELP: Display the commands list\"\r\n \"\\nGO (area): Move to the inputted area\"\r\n \"\\nLOOK AROUND: Look around your current area\"\r\n \"\\nCOLLECT (item): Add an item to your collection.\"\r\n \"\\nUSE (item): Put an item to good use.\"\r\n \"\\nCOLLECTION: Take a look at your collection.\"\r\n \"\\nSEARCH (place): Take a thorough search of a specific place\"\r\n \"\\nLOCATION: Gives you the name of your current location.\"\r\n \"\\nPARTY: Gives you a look at your current party and their status\")", "def _help(self):\r\n helpWindow = Toplevel()\r\n rules = Text(helpWindow)\r\n rules.insert(END, HELP_MESSAGE)\r\n rules.grid(row=1, column=0)\r\n rules.config(state=DISABLED, width=100)\r\n rules.tag_add(\"title\", 0.0, 2.0)\r\n rules.tag_config(\"title\", justify=CENTER, font=TITLE_FONT)\r\n rules.tag_add(\"content\", 2.0, END)\r\n rules.tag_config(\"content\", font=GENERAL_TEXT_FONT)", "def display_help_about():\n showinfo(\"Help about.\", \"Password checker version 1.1\")", "def showhelp():\n\tusage()", "def help_dialog(parent_window):\n tkMessageBox.showinfo(\"Help\", \"Not implemented, check back later.\")", "def help(self, dummy):\n doc = self.doc\n if not doc:\n doc = \"No help available.\"\n elif doc.find(\"%s\") > 0:\n doc = doc.replace(\"%s\", self.progname)\n print(doc, end='')\n sys.exit(0)", "def main():\n menu()", "def show_help_menu(self):\n return self._show_help_menu", "def calculator_help():\n print(\"HELP\")\n print_options()", "def help_bye(self):\n bye.SppBye.help()", "def about():\n\tclick.echo('\\n')\n\tf = Figlet(font='slant')\n\tprint(f.renderText('ENALP CLI'))\n\tclick.secho(\"ENALP CLI: Easy NAtural Language Processing CLI\",fg='cyan')\n\tclick.secho(\"By: Rosario Moscato\",fg='white')\n\tclick.secho(\"mailto: rosario.moscato@outlook.com\",fg='cyan')\n\tclick.secho(\"https://www.linkedin.com/in/rosariomoscato/\",fg='white')\n\tclick.echo('\\n')", "def __init__( self, wintitle, width, height ):\n print( self.helpMessage )\n #self.win = GraphWin( wintitle, width, height ) \n #MenuMessage( self.helpMessage, 0, 80, self.win )\n #MenuMessage( self.closeMessage, .8 * height, 20, self.win )\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
prints the final credits in curses in the main window
def roll_credits(self): self.main_win.erase() x, y = self.print_text(4,2,"Credits ", curses.A_BOLD) for key in CREDITS: y += 2 x = 25 self.print_text(x,y,key, curses.A_BOLD) self.write_main_artifact(PAVO)
[ "def print_credits(self):\n # Get header messages\n message = '\\n' + self.prefix + plugin_strings[\n 'Credits'\n ].get_string() + '\\n' + '=' * 61 + '\\n\\n'\n\n # Loop through all groups in the credits\n for group in gungame_credits:\n\n # Add the current group's name\n message += '\\t' + group + ':\\n'\n\n # Loop through all names in the current group\n for name, values in gungame_credits[group].items():\n\n # Add the current name\n message += '\\t\\t' + name + ' ' * (\n 20 - len(name)\n ) + values['username'] + '\\n'\n\n # Add 1 blank line between groups\n message += '\\n'\n\n # Print the message\n self.logger.log_message(message + '=' * 61 + '\\n\\n')", "def menu_credits(self):\n\n self.next = c.CREDITS_MENU\n self.background = prepare_game.GFX['menu_bg']", "def drawscreen():\n\n win = GraphWin('Target Practice', 500, 500)\n win. setCoords(0.0, 0.0, 10.0, 10.0)\n\n drawintro(win)\n drawtarget(Point(5, 5), 50, win)\n drawkey(win, Point(1.5, 8.5))\n currentscore = 0\n textscore = Text(Point(8, 1), \"Current Score: {0}\".format(0))\n textscore.draw(win)\n for i in range(5):\n p = drawpoint(win)\n textscore.undraw()\n currentscore += calcscore(p, Point(5, 5))\n textscore = Text(Point(8, 1), \"Current Score: {0}\".format(currentscore))\n textscore.draw(win)\n textscore.undraw()\n finalscore = Text(Point(5, 1), \"Your final score was: {0}\".format(currentscore) +\n \"\\nClick to Exit\")\n finalscore.draw(win)\n win.getMouse()\n win.close()", "def display_text(self):\n\n print(\"\\n\" * 100)\n print(\"Help MacGyver (M) to escape !\\n\")\n print(\"Controls:\\n\")\n print(\" Z\")\n print(\"Q S D\\n\")\n print(\"Pick up all the items (I) and reach the Guardian (G).\")\n print(\"If you try to escape without all the items, you will lose!\\n\")\n print(f\"Inventory: {str(self.game.player.inventory)}/3 items\\n\")", "def updateScreenAccountInfo():\n shares.calculateTotalInvestAccount()\n print(\"Hello\", menus.loggedUser[0][0] +\n \"! Welcome to your online shares trading account.\\n\")\n\n print(\"\\nShare Prices:\")\n print(\"Share 1 price per share: \".ljust(\n 25, ' '), \"£\", shares.share1rounded)\n print(\"Share 2 price per share: \".ljust(\n 25, ' '), \"£\", shares.share2rounded)\n print(\"Share 3 price per share: \".ljust(\n 25, ' '), \"£\", shares.share3rounded)\n print(\"\\nYour Assets:\")\n print(\"Cash Account Value: \".ljust(25, ' '), \"£\",\n format(cashAccount.cashAccount, \".2f\"))\n print(\"Investing Account Value: \".ljust(25, ' '), \"£\",\n format(shares.investAccount, \".2f\"))\n print(\"\\n\")", "def print_intro():\n globals.clear_screen()\n print(\n \"THE DESTROYER'S DESTINY\\n\"\n \"\\tCSC 11300 Projects 1 & 2\\n\"\n \"\\tBy: Vishnu Nair\\n\\n\"\n \"(C) 2015 Vishnu Nair. All rights reserved.\\n\"\n )", "def display_add_scr(stdscr, wallet: Wallet):\n c = 0 # last character read\n option = 0\n \n while c != ESCAPE and c != ENTER:\n add_menu_header(stdscr)\n display_options_bar(stdscr, SUB_MENU_START[Y], SUB_MENU_START[X],\n [\"Add addresses to watch\", \"Add balance manually\"], option, 'vertical')\n c, option = read_option(stdscr, option, 2, 'vertical')\n \n if c == ESCAPE:\n return\n \n last_line = SUB_MENU_START[Y] # the last line we wrote to\n try:\n curses.echo() # so the user sees what he types\n curses.curs_set(1)\n \n add_menu_header(stdscr)\n stdscr.addstr(last_line + 2, SUB_MENU_START[X],\n \"Enter coin code/symbol (e.g. BTC): \")\n last_line += 2\n coin_code = stdscr.getstr().decode(\"utf-8\").upper()\n \n if option == 0:\n stdscr.addstr(last_line + 2, SUB_MENU_START[X],\n \"Enter addresses to watch (comma separated, e.g. addr1,addr2,addr3):\")\n last_line += 2\n stdscr.move(last_line + 1, SUB_MENU_START[X])\n last_line += 1\n addresses = read_address_from_user(stdscr)\n wallet.add_addresses(coin_code, addresses)\n else:\n # manually add balance\n stdscr.addstr(last_line + 2, SUB_MENU_START[X], \"Enter amount to add: \")\n last_line += 2\n amount = float(stdscr.getstr().decode(\"utf-8\"))\n wallet.add_manual_balance(coin_code, amount)\n \n curses.curs_set(0)\n curses.noecho()\n except Exception:\n curses.curs_set(0)\n curses.noecho()\n return None", "def refresh_display(self):\n print('---'*20)\n print(\"Champion \" + self.name)\n print('Health: {0} Stamina: {1}'.format(round(self.current_health,2),\n round(self.current_stamina,2)))", "def produce_display(self):\n call('clear' if os.name == 'posix' else 'cls')\n curreny = self.vending_machine.get_currency()\n input_amount = self.vending_machine.get_input_amount()\n change_coins_state = self.vending_machine.get_current_change_status()\n print('---------------------------------------------')\n print(f'[Input amount]\\t\\t{input_amount} {curreny}')\n change_coin_text = '[Change]\\t'\n not_first = False\n for change_coins in change_coins_state.items():\n if not_first:\n change_coin_text += '\\t'\n not_first = True\n change_coin_text += f'\\t{str(change_coins[0])} {curreny} \\t {change_coins[1]}\\n'\n print(change_coin_text)\n return_gate_text = '[Return gate]\\t'\n # import ipdb; ipdb.set_trace()\n return_coins = self.vending_machine.get_change_coins_dict()\n\n if return_coins:\n return_coins_list = list(return_coins.keys())\n return_coins_list.sort()\n for return_coin in return_coins_list:\n for _ in range(0, return_coins[return_coin]):\n return_gate_text += f'\\t\\t{return_coin} {curreny}\\n'\n else:\n return_gate_text += 'Empty\\n'\n print(return_gate_text)\n\n items_for_sale_text = '[Items for sale]'\n product_details_list = self.vending_machine.get_product_details_list()\n not_first = False\n for product in product_details_list:\n if not_first:\n items_for_sale_text += '\\t\\t'\n not_first = True\n items_for_sale_text += f'\\t {product[\"id\"]}. {product[\"name\"]} \\t ' \\\n f'{product[\"price\"]} {curreny} \\t {product[\"status\"]} \\n'\n print(items_for_sale_text)\n outlet_text = f'[Outlet]'\n items_in_outlet = self.vending_machine.get_items_in_outlet_list()\n not_first = False\n for product_id in items_in_outlet:\n if not_first:\n outlet_text += '\\t'\n not_first = True\n outlet_text += f'\\t {self.vending_machine.get_product_details(product_id)[\"name\"]} \\n'\n print(outlet_text)\n if self.error_msg:\n print(f'Error : {self.error_msg}')\n self.error_msg = None\n print('---------------------------------------------')", "def updateAndDisplay( self ):\n self.copyK.updateSolveDic( self.solveDic )\n self.copyK.updatePossDic( self.possDic )\n self.displayK.drawKenKen( self.copyK )\n raw_input( \"Press [Enter] to continue\" )", "def print_secret_msg():\n PRINTER.wake()\n PRINTER.println(textwrap.fill(\"You found a golden ticket! \\n\\n Bring this to the circulation desk to claim your prize!\", width=32))\n PRINTER.feed(5)\n PRINTER.sleep()", "def drawContinue():\n msg = \"Press any to continue...\"\n w = len(str(msg)) + 2\n l = 3\n y = 40\n x = 20\n win = curses.newwin(l, w, y, x)\n win.addstr(1,1,msg)\n win.box()\n\n pan = curses.panel.new_panel(win)\n curses.panel.update_panels()\n win.noutrefresh();curses.doupdate()\n STDSCR.getch()\n pan.hide()\n return pan", "def _end_curses(self):\n logger.debug(\"[TestNotFound] end curses\")\n curses.nocbreak()\n self.window.keypad(0)\n curses.echo()\n curses.endwin()", "def status(text):\n if SHOW_UI:\n pygame.display.set_caption(text)\n stdout.write('\\r%s' % text)\n stdout.flush()", "def instructions(self):\n os.system('clear')\n print('\\n')\n print('{:^80}'.format('-----------Tic Tac Toe-----------'), end='\\n\\n')\n print('{:^80}'.format('Squares are numbered 1-9 starting'))\n print('{:^80}'.format('with the top left corner.'))", "def draw(self, key):\n # Screen size\n height, width = self.stdscr.getmaxyx()\n # Position information\n posx = 2\n start_pos = 2\n self.stdscr.addstr(start_pos, posx, \"jetson_clocks controller\", curses.A_BOLD)\n if self.jetson.userid == 0:\n # button start/stop jetson clocks\n box_keyboard(self.stdscr, start_pos - 1, posx + 1, \"a\", key)\n # Read status jetson_clocks\n start = self.jetson.jetson_clocks.start\n status = self.jetson.jetson_clocks.status\n box_status(self.stdscr, start_pos + 4, posx + 1, status.capitalize(), start)\n if self.jetson.userid == 0:\n # button start/stop jetson clocks\n box_keyboard(self.stdscr, start_pos - 1, posx + 4, \"e\", key)\n # Read status jetson_clocks\n enabled = self.jetson.jetson_clocks.enable\n enabled_box = \"Enable\" if enabled else \"Disable\"\n box_status(self.stdscr, start_pos + 4, posx + 4, enabled_box, enabled)\n # Build NVP model list\n nvpmodel = self.jetson.nvpmodel\n if nvpmodel is not None:\n self.stdscr.addstr(start_pos + 8, posx, \"NVP model\", curses.A_BOLD)\n if self.jetson.userid == 0:\n # Draw keys to decrease nvpmodel\n box_keyboard(self.stdscr, start_pos + 10, posx + 7, \"-\", key)\n # Draw selected number\n self.stdscr.addstr(start_pos + 8, posx + 16, str(nvpmodel.selected), curses.A_NORMAL)\n # Draw keys to increase nvpmodel\n box_keyboard(self.stdscr, start_pos + 18, posx + 7, \"+\", key)\n # Write list of available modes\n mode_names = [mode[\"Name\"] for mode in nvpmodel.modes]\n mode_status = [mode[\"status\"] for mode in nvpmodel.modes]\n box_list(self.stdscr, start_pos - 1, posx + 10, mode_names, nvpmodel.num, status=mode_status, max_width=42, numbers=True)\n # Add plot fan status\n if 'FAN' in self.jetson.stats:\n fan = self.jetson.stats['FAN']\n # Add label\n if 'cpwm' in fan:\n label = \"{current: >3}% of {target: >3}%\".format(current=fan.get(\"cpwm\", 0), target=fan.get(\"tpwm\", 0))\n else:\n label = \"Target: {target: >3}%\".format(target=fan.get(\"tpwm\", 0))\n # Evaluate size chart\n size_x = [posx + 40, width - 10]\n size_y = [2, height - 3]\n # Draw the GPU chart\n self.chart_fan.draw(self.stdscr, size_x, size_y, label=label)", "def compose_display():\r\n print(\"### Compose a composition ###\\n\"\r\n \"Here you can choose a file with composing instruction.\\n\"\r\n \"Our function will compose it for you.\")", "def wrap_refresh():\n if not DEBUG:\n tty.refresh()\n else:\n for y in range(20):\n line = []\n for x in range(80):\n line.append(uchr(debug_curses_screen[y][x]['key']))\n print(''.join(line))\n print('{0}'.format(debug_curses_cursor))", "def display(self):\n self.window.erase()\n for idx, item in enumerate(self.items[self.top:self.top + self.max_lines]):\n # Highlight the current cursor line\n if idx == self.current:\n self.window.addstr(idx, 0, item, curses.color_pair(2))\n else:\n self.window.addstr(idx, 0, item, curses.color_pair(1))\n self.window.refresh()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
orthogonalize features with an ensemble of estimators using precomputed set of ensemble weights (following Chernozhukov et al., 2017)
def _two_step_orthogonalization( nfolds: int, tsize: int, df_folds: list, fold_combinations: tuple, nuisance_estim: list, ensemble_weights: np.array, in_ensemble_weights=False, ) -> tuple: # initiate the list storage for orthogonalized features orthogonalized_target_and_treatment = [] for cbn in fold_combinations: # determine what folds have what task in the current run of estimation linear_folds = cbn[: nfolds[0]] nuisance_folds = cbn[nfolds[0] :] # split samples into 2 parts: training the nuisance parameters and # estimating the parameters of interest on orthogonalized features df_train = np.vstack([df_folds[c] for c in nuisance_folds]) df_params = np.vstack([df_folds[c] for c in linear_folds]) # initialize fitted values of treatment regressors fitted_values = np.zeros([df_params.shape[0], tsize + 1, len(nuisance_estim)]) estimators_linear = np.zeros([df_params.shape[0], tsize + 1]) # fit each variable of interest seperately against the nuisance params # note that there are tsize treatment features + 1 target feature for t in range(tsize + 1): for which, estim in enumerate(nuisance_estim): # train the model using nuisance sample estim.fit(df_train[:, tsize + 1 :], df_train[:, t]) # fit values using the linear sample fitted_values[:, t, which] = estim.predict(df_params[:, tsize + 1 :]) if in_ensemble_weights: tX = fitted_values[:, t, :] ensemble_weights[:, t] = np.linalg.inv(tX.T.dot(tX)).dot( tX.T.dot(df_params[:, t]) ) # use pre-computed weights to combine the nuisance estimators estimators_linear[:, t] = fitted_values[:, t, :].dot(ensemble_weights[:, t]) # initialize orthogonalized features for each ensemble estimator orthogonal_features = df_params[:, : tsize + 1] - estimators_linear orthogonalized_target_and_treatment.append(orthogonal_features) # return stacked orthogonalized features; note that order # of observations needs to be preserved here return np.vstack(orthogonalized_target_and_treatment)
[ "def ensemble_weights_cv(\n X: np.array,\n y: np.array,\n nuisance_estimators: list,\n ensemble_estimator: object,\n nfolds=5,\n) -> np.array:\n # stack features together for consistent splitting in cross-validation\n df = np.hstack([y, X])\n\n # create sum(nfolds) combinations of folds so that each piece of data is\n # used the same amount of times throughout the estimation\n fold_combinations = [\n list(range(i, nfolds)) + list(range(0, i)) for i in range(nfolds)\n ]\n\n # determine fold size and fold the dataset (approximately) evenly\n sample_fold = int(np.floor(df.shape[0] / nfolds))\n df_folds = np.split(df, [sample_fold * i for i in range(1, nfolds)])\n\n # initiate final weights matrix\n final_weights = np.zeros([len(nuisance_estimators), y.shape[1]])\n\n for cbn in fold_combinations:\n # assign roles to folds in the current run\n ensemble_sample = df_folds[0]\n train_sample = np.vstack(df_folds[1:])\n\n # initiate the weights for each ensemble and feature in this run\n current_run_weights = np.zeros([len(nuisance_estimators), y.shape[1]])\n for t in range(y.shape[1]):\n # initiate fitted values array\n fitted_values = np.zeros(\n [ensemble_sample.shape[0], len(nuisance_estimators)]\n )\n\n for which, estimator in enumerate(nuisance_estimators):\n # train the nuisance parameter estimator\n estimator.fit(train_sample[:, y.shape[1] :], train_sample[:, t])\n\n # fit the values on the ensemble sample\n fitted_values[:, which] = estimator.predict(\n ensemble_sample[:, y.shape[1] :]\n )\n # estimate weights of fitted values against ensemble sample target\n ensemble_estimator.fit(fitted_values, ensemble_sample[:, t])\n\n # store the weights for the feature t of the current run\n current_run_weights[:, t] = ensemble_estimator.coefs_\n\n # update final weights with set of weights for each of the k features\n # estimated divided by the number of nfold cross-validation runs\n final_weights += current_run_weights / nfolds\n\n return final_weights", "def exp(args):\n\n ####################################################################################################################\n #t_0 = time.time()\n # load the parameters from the arguments \n [x_init, i, seed, diff, h, f] = args\n sys_dim = len(x_init)\n\n\n # number of ensemble members generated from the initial condition\n N_ens = 100\n\n # time at which we compute an analysis of the ensemble in continuous time\n tanl = .01\n\n # the number of analyses we produce of the forward ensemble\n nanl = 2000\n\n # fourier truncation\n p = 1\n \n # static parameters based on fourier truncation\n RHO = rho(p)\n ALPHA = alpha(p)\n\n # set the storage for the ensemble means\n t_mean = np.zeros([sys_dim, nanl])\n e_mean = np.zeros([sys_dim, nanl])\n r_mean = np.zeros([sys_dim, nanl])\n a_mean = np.zeros([sys_dim, nanl])\n\n # set the storage for the spread of ensembles\n t_spread = np.zeros([nanl])\n e_spread = np.zeros([nanl])\n r_spread = np.zeros([nanl])\n a_spread = np.zeros([nanl])\n \n # we copy the initial condition into N_ens copies to forward propagate\n X_t_ens = np.tile(x_init, (N_ens, 1))\n X_e_ens = np.tile(x_init, (N_ens, 1))\n X_r_ens = np.tile(x_init, (N_ens, 1))\n X_a_ens = np.tile(x_init, (N_ens, 1))\n\n # set random seed for the same ensemble noise processes\n np.random.seed(seed)\n\n # for each forward time when we analyze the ensemble\n for j in range(nanl):\n #looping over the ensemble member\n for k in range(N_ens):\n # integrate until the next sample time\n for l in range(int(tanl/h)):\n # generate the weiner process over the interval at a fine discretization\n xi = np.random.standard_normal([sys_dim, int(round(tanl / 0.001))])\n\n # then compute the brownian motion a the current step size, re-normalized to unit variance\n tmp = np.zeros([sys_dim, int(round(tanl / h))])\n for m in range(int(round(tanl / h ))):\n tmp[:, m] = np.sum(xi[:, m * int(h / 0.001) : (m + 1) * int(h / 0.001)], axis=1) / np.sqrt(h / 0.001)\n \n # reset xi to be the Brownian path as generated by the finer discretization, normalized to have each component\n # drawn from a normal of unit variance\n xi = tmp\n\n\n # recursivley integrating one step forward via second order taylor, EM and RK schemes\n # note that the same weiner process is utilized for each integration scheme\n X_t_ens[k, :] = ty_step_path(X_t_ens[k, :], np.squeeze(xi[:, l]), h, [ALPHA, RHO, p, f, diff])\n X_e_ens[k, :] = em_step_path(X_e_ens[k, :], np.squeeze(xi[:, l]), h, [f, diff])\n X_r_ens[k, :] = rk_step_path(X_r_ens[k, :], np.squeeze(xi[:, l]), h, [f, diff])\n X_a_ens[k, :] = l96_rk4_step(X_r_ens[k, :], h, f)\n \n # make a final perturbation by the same Brownian process all at the end instead, for the ad hoc method\n ipdb.set_trace()\n X_a_ens[k, :] = X_a_ens[k, :] + diff * np.sum(xi * h, axis=1)\n \n ### then produce statistics of the ensemble at the analysis time\n \n # the ensemble mean for each method\n t_mean[:, j] = np.mean(X_t_ens, axis=0)\n e_mean[:, j] = np.mean(X_e_ens, axis=0)\n r_mean[:, j] = np.mean(X_r_ens, axis=0)\n a_mean[:, j] = np.mean(X_a_ens, axis=0)\n\n\t# we compute the spread as in whitaker & louge 98 by the standard deviation of the mean square deviation of the ensemble\n t_spread[j] = np.sqrt( ( 1 / (N_ens - 1) ) * np.sum(np.mean( (np.squeeze(t_mean[:, j]) - X_t_ens)**2, axis=1)))\n e_spread[j] = np.sqrt( ( 1 / (N_ens - 1) ) * np.sum(np.mean( (np.squeeze(e_mean[:, j]) - X_e_ens)**2, axis=1)))\n r_spread[j] = np.sqrt( ( 1 / (N_ens - 1) ) * np.sum(np.mean( (np.squeeze(r_mean[:, j]) - X_r_ens)**2, axis=1)))\n a_spread[j] = np.sqrt( ( 1 / (N_ens - 1) ) * np.sum(np.mean( (np.squeeze(a_mean[:, j]) - X_a_ens)**2, axis=1)))\n\n data = {\n 'e_mean': e_mean, 'e_spread': e_spread, \n 'r_mean': r_mean, 'r_spread': r_spread, \n 't_mean': t_mean, 't_spread': t_spread, \n 'a_mean': a_mean, 'a_spread': a_spread \n }\n \n fname = './data/ensemble_stats/' \\\n 'ensemble_statistics_h_' + str(h).zfill(3) + '_sys_dim_' + str(sys_dim).zfill(2) + '_tanl_' + \\\n str(tanl).zfill(3) + '_diffusion_' + str(diff).zfill(3) + \\\n '_init_con_' + str(i).zfill(6) + '.txt'\n \n f = open(fname, 'wb')\n pickle.dump(data, f)\n f.close()\n #print(time.time() - t_0)\n return i", "def ensemble(dict_model_acc, test_design, method='vote'):\n pred_models_dict = {}\n pred_models_lst = []\n prob_models_dict = {}\n prob_models_lst = []\n prob1_models_lst = []\n acc_lst = []\n test_design = np.array(test_design)\n\n for name_model, (model, acc) in dict_model_acc.items():\n pred_model = model.predict(test_design).tolist()\n pred_models_dict[name_model] = pred_model\n pred_models_lst.append(pred_model)\n\n acc_lst.append(acc)\n\n pred_models_df = pd.DataFrame(pred_models_lst)\n\n if method == 'vote':\n pred_vote_df = pred_models_df.mode()\n pred_vote_lst = list(pred_vote_df.loc[0, :])\n\n return pred_vote_lst\n\n prob_models_dict = {}\n prob_models_lst = []\n prob1_models_lst = []\n acc_lst = []\n\n for name_model, (model, acc) in dict_model_acc.items():\n prob_model = model.predict_proba(test_design)\n prob1_model = np.array(prob_model)[:, 1].tolist()\n prob_models_dict[name_model] = prob_model\n prob1_models_lst.append(prob1_model)\n prob_models_lst.append(prob_model)\n\n acc_lst.append(acc)\n\n prob1_models_df = pd.DataFrame(prob1_models_lst)\n\n if method == 'avg_unif':\n prob1_avgunif_lst = list(prob1_models_df.mean())\n pred_avgunif_lst = [int(score > 0.5) for score in prob1_avgunif_lst]\n\n return pred_avgunif_lst, prob1_avgunif_lst\n elif method == 'avg_softmax':\n sum_exp_acc = sum(np.exp(acc_lst))\n acc_softmax = [np.exp(item) / sum_exp_acc for item in acc_lst]\n prob1_weighted_df = prob1_models_df.multiply(acc_softmax, axis='rows')\n prob1_softmax_lst = list(prob1_weighted_df.sum())\n pred_softmax_lst = [int(score > 0.5) for score in prob1_softmax_lst]\n\n return pred_softmax_lst, prob1_softmax_lst\n\n #elif method == 'grid_search':", "def _parallel_build_estimators(n_estimators, ensemble, X, y, sample_weight,\n seeds, total_n_estimators, verbose):\n # Retrieve settings\n n_samples, n_features = X.shape\n max_features = ensemble._max_features\n max_samples = ensemble._max_samples\n bootstrap = ensemble.bootstrap\n bootstrap_features = ensemble.bootstrap_features\n support_sample_weight = has_fit_parameter(ensemble.base_estimator_,\n \"sample_weight\")\n if not support_sample_weight and sample_weight is not None:\n raise ValueError(\"The base estimator doesn't support sample weight\")\n\n # Build estimators\n estimators = []\n estimators_features = []\n\n for i in range(n_estimators):\n if verbose > 1:\n print(\"Building estimator %d of %d for this parallel run \"\n \"(total %d)...\" % (i + 1, n_estimators, total_n_estimators))\n\n random_state = np.random.RandomState(seeds[i])\n\n # Draw random feature, sample indices\n features, indices = _generate_bagging_indices(random_state,\n bootstrap_features,\n bootstrap, n_features,\n n_samples, max_features,\n max_samples)\n\n # Draw samples, using sample weights, and then fit\n if support_sample_weight:\n if sample_weight is None:\n curr_sample_weight = np.ones((n_samples,))\n else:\n curr_sample_weight = sample_weight.copy()\n\n if bootstrap:\n sample_counts = np.bincount(indices, minlength=n_samples)\n curr_sample_weight *= sample_counts\n else:\n not_indices_mask = ~indices_to_mask(indices, n_samples)\n curr_sample_weight[not_indices_mask] = 0\n\n estimator.fit(X[:, features], y, sample_weight=curr_sample_weight)\n\n else:\n estimator = ensemble._make_estimator((X[indices])[:, features], append=False,\n random_state=random_state)\n print(features)\n estimator.fit((X[indices])[:, features], y[indices])\n\n estimators.append(estimator)\n estimators_features.append(features)\n\n return estimators, estimators_features", "def _three_step_orthogonalization(\n nfolds: int,\n tsize: int,\n df_folds: list,\n fold_combinations: tuple,\n nuisance_estim: list,\n ensemble_estim: list,\n) -> tuple:\n # initiate the list storage for orthogonalized features\n orthogonalized_target_and_treatment = []\n\n # routine is rerun nfold times so that each fold is used\n # in different tasks the same amount of times\n for cbn in fold_combinations:\n\n # determine what folds have what task in the current run of estimation\n linear_folds = cbn[: nfolds[0]]\n ensemble_folds = cbn[nfolds[0] : nfolds[0] + nfolds[1]]\n nuisance_folds = cbn[nfolds[0] + nfolds[1] :]\n\n # split samples into 3 parts: training the nuisance parameters;\n # determining ensemble weights; estimating the parameters of interest\n df_train = np.vstack([df_folds[c] for c in nuisance_folds])\n df_ensemble = np.vstack([df_folds[c] for c in ensemble_folds])\n df_params = np.vstack([df_folds[c] for c in linear_folds])\n\n # initialize fitted values for target and treatment features\n estimators_ensemble = np.zeros(\n [df_ensemble.shape[0], tsize + 1, len(nuisance_estim)]\n )\n estimators_linear_nuisance = np.zeros(\n [df_params.shape[0], tsize + 1, len(nuisance_estim)]\n )\n estimators_linear_ensemble = np.zeros(\n [df_params.shape[0], tsize + 1, len(ensemble_estim)]\n )\n\n # fit each variable of interest seperately against the nuisance params\n # and predict orthogonalized features using ensemble and linear samples\n for i in range(tsize + 1):\n for which, estim in enumerate(nuisance_estim):\n # train the model using the train sample only\n estim.fit(df_train[:, tsize + 1 :], df_train[:, i])\n\n # predict on both ensemble and linear params samples\n estimators_ensemble[:, i, which] = estim.predict(\n df_ensemble[:, tsize + 1 :]\n )\n estimators_linear_nuisance[:, i, which] = estim.predict(\n df_params[:, tsize + 1 :]\n )\n\n for which, estim in enumerate(ensemble_estim):\n # train ensemble using fitted values from previous step\n estim.fit(estimators_ensemble[:, i, :], df_ensemble[:, i])\n\n # and predict the features using fitted values on linear\n # parameters sample and trained weights on ensemble sample\n estimators_linear_ensemble[:, i, which] = estim.predict(\n estimators_linear_nuisance[:, i, :]\n )\n # average over the predictions of different ensemble methods used\n averaged_ensembles = np.mean(estimators_linear_ensemble, axis=2)\n\n # orthonalize the target and linear features against fitted values\n orthogonal_features = df_params[:, : tsize + 1] - averaged_ensembles\n\n # note that order of linear folds needs to be preserved here\n orthogonalized_target_and_treatment.append(orthogonal_features)\n\n # combine list of orthogonalized features into a single array\n return np.vstack(orthogonalized_target_and_treatment)", "def combine(all_ensembles):\n final_ensemble = copy(all_ensembles[0])\n final_ensemble.estimators_ = []\n\n for ensemble in all_ensembles:\n final_ensemble.estimators_ += ensemble.estimators_\n\n # Required in old versions of sklearn\n final_ensemble.n_estimators = len(final_ensemble.estimators_)\n\n return final_ensemble", "def _careful_parallel_build_estimators(n_estimators, ensemble, X, y, sample_weight,\n seeds, total_n_estimators, verbose):\n # Retrieve settings\n n_samples, n_features = X.shape\n max_features = ensemble._max_features\n max_samples = ensemble._max_samples\n bootstrap = ensemble.bootstrap\n bootstrap_features = ensemble.bootstrap_features\n support_sample_weight = has_fit_parameter(ensemble.base_estimator_, \"sample_weight\")\n if not support_sample_weight and sample_weight is not None:\n raise ValueError(\"The base estimator doesn't support sample weight\")\n\n # Build estimators\n estimators = []\n estimators_features = []\n\n for i in range(n_estimators):\n if verbose > 1:\n print(\"Building estimator %d of %d for this parallel run \"\n \"(total %d)...\" % (i + 1, n_estimators, total_n_estimators))\n\n random_state = np.random.RandomState(seeds[i])\n estimator = ensemble._make_estimator(append=False,\n random_state=random_state)\n \n ''' UPDATED SAMPLING SECTION '''\n # Draw random feature, sample indices\n features, indices = _generate_bagging_indices(\n random_state, bootstrap_features, bootstrap, n_features,\n n_samples, max_features, max_samples)\n \n while len(np.unique(y[indices])) < 2:\n # Resample until training set is not single-class\n features, indices = _generate_bagging_indices(\n random_state, bootstrap_features, bootstrap, n_features,\n n_samples, max_features, max_samples)\n \n # Don't use sample weights, to be compatible with LinearSVC\n estimator.fit((X[indices])[:, features], y[indices])\n\n ''' END OF MODIFIED SECTION '''\n estimators.append(estimator)\n estimators_features.append(features)\n\n return estimators, estimators_features", "def define(ensemble_model, k_neighbors, classes=2, freeze=False):\n #Neighbor input \n #shape is equal to the concat shape of the ensemble model\n if freeze:\n for x in ensemble_model.layers:\n x.trainable=False\n \n input_shape = (k_neighbors, classes)\n neighbor_inputs = tf.keras.layers.Input(shape=input_shape, name=\"neighbor_input\")\n \n neighbor_distances = tf.keras.layers.Input(shape=(k_neighbors), name=\"neighbor_distance_input\")\n \n #original featuers from target tree\n original_features = ensemble_model.get_layer(\"ensemble_learn\").output\n\n attention_features = tf.keras.layers.Attention(use_scale=True)([original_features, neighbor_inputs])\n \n ##Squueze 1st dim for addition with original features\n scaled_context = tf.keras.layers.GlobalAveragePooling1D()(attention_features)\n \n #Add as residual to original matrix normalized\n context_residual = WeightedSum(name=\"ensemble_add_bias\")([scaled_context,original_features]) \n context_residual = tf.keras.layers.Dense(classes)(context_residual)\n output = tf.keras.layers.Softmax(name=\"neighbor_softmax\")(context_residual)\n\n return ensemble_model.inputs, neighbor_inputs, neighbor_distances, output", "def testConcatenationWeights(self):\n\n ensemble = ENSEMBLEW + ENSEMBLEW\n assert_equal(ensemble.getCoordsets(arange(3)), ATOMS.getCoordsets(),\n 'concatenation failed')\n assert_equal(ensemble.getCoordsets(arange(3,6)), ATOMS.getCoordsets(),\n 'concatenation failed')\n assert_equal(ensemble.getCoords(), COORDS,\n 'concatenation failed')\n assert_equal(ensemble.getWeights(), ENSEMBLEW.getWeights(),\n 'concatenation failed')", "def testConcatenationNoweightsWeights(self):\n\n ensemble = ENSEMBLE + ENSEMBLEW\n assert_equal(ensemble.getCoordsets(arange(3)), ATOMS.getCoordsets(),\n 'concatenation failed')\n assert_equal(ensemble.getCoordsets(arange(3,6)), ATOMS.getCoordsets(),\n 'concatenation failed')\n assert_equal(ensemble.getCoords(), COORDS,\n 'concatenation failed')\n self.assertIsNone(ensemble.getWeights(), 'concatenation failed')", "def infer_ensemble(data, network_list, trial_per_sample):\n data_var_img = Variable(data[0][0].float().cuda())\n data_var_angle = Variable(data[1].float().cuda())\n networks_logits = []\n for net in network_list:\n trial_outputs = net(data_var_img, data_var_angle, trials=trial_per_sample).data\n networks_logits.append(trial_outputs)\n networks_logits = torch.stack(networks_logits, 1).squeeze_()\n probabilities = torch.sigmoid(networks_logits)\n pred_mean = torch.mean(probabilities)\n pred_std = torch.std(probabilities)\n return pred_mean, pred_std", "def train(self, features):", "def _fit_ensemble(self, y, X=None):\n fh = np.arange(len(y)) + 1\n estimator_predictions = np.column_stack(self._predict_forecasters(fh, X))\n y = np.array(y)\n\n self.ensemble_algorithm.update(estimator_predictions.T, y)", "def generate_ensemble_model_and_batch(\n self,\n ):\n batch_x = tf.constant([[1, 1], [0, 0]], dtype=tf.float32)\n models = []\n for k in range(2):\n model = tf.keras.layers.Dense(2, activation='softmax')\n model(batch_x)\n if k == 0:\n weights = np.array([[1, 3], [0, 0]], dtype=np.float32)\n else:\n weights = np.array([[2, 1], [0, 1]], dtype=np.float32)\n bias = np.zeros(2, dtype=np.float32)\n model.set_weights([weights, bias])\n models.append(model)\n return models, batch_x", "def train(self):\n for ens_mem in self.ensemble_members:\n ens_mem.train()", "def normalise_features(features, normaliser=None):\n stacked_features = np.vstack(features).astype(np.float64)\n if normaliser == None: \n normaliser = StandardScaler().fit(stacked_features)\n normed_features = normaliser.transform(stacked_features)\n \n return normed_features, normaliser", "def generate_ensemble_simple_mlp_and_batch(\n self,\n ):\n batch_x = tf.constant([[1, 1], [0, 0]], dtype=tf.float32)\n models = []\n for _ in range(2):\n model = model_util.get_simple_mlp(input_shape=(2,), num_classes=2)\n model(batch_x)\n models.append(model)\n return models, batch_x", "def construct_ensemble(T, train_partition):\n #initalizes a list to hold the decision stumps later\n ensemble_d = [None]*T\n for i in range(T):\n # gets a partition with data (random+ w/ replacement) and F(random + without replacement)\n random_partition = util.bootstrap_partition(train_partition)\n #passes the newly generated partition into DecisionStump class\n one_d = DecisionStump(random_partition)\n #adds the current decision stump to the ensemble list\n ensemble_d[i] = one_d\n return ensemble_d", "def _expected_without_replacement(weights, attention, features):\n # Reshape the passed weights and attention in feature compatible sahpes\n axes = [-1] * (K.ndim(features) - 2)\n wf = expand_many(weights, axes)\n af = expand_many(attention, axes)\n\n # Compute how much of the probablity mass was available for each sample\n pm = 1 - K.tf.cumsum(attention, axis=1, exclusive=True)\n pmf = expand_many(pm, axes)\n\n # Compute the features\n Fa = af * features\n Fpm = pmf * features\n Fa_cumsum = K.tf.cumsum(Fa, axis=1, exclusive=True)\n F_estimator = Fa_cumsum + Fpm\n\n F = K.sum(wf * F_estimator, axis=1)\n\n # Compute the gradient\n def gradient(grad):\n N = K.shape(attention)[1]\n probs = attention / pm\n probsf = expand_many(probs, axes)\n grad = K.expand_dims(grad, 1)\n\n # Gradient wrt to the attention\n ga1 = F_estimator / probsf\n ga2 = (\n K.tf.cumsum(features, axis=1, exclusive=True) -\n expand_many(to_float32(K.tf.range(N)), [0]+axes) * features\n )\n ga = grad * (ga1 + ga2)\n ga = K.sum(ga, axis=list(range(2, K.ndim(ga))))\n ga = ga * weights\n\n # Gradient wrt to the features\n gf = expand_many(to_float32(K.tf.range(N-1, -1, -1)), [0]+axes)\n gf = pmf + gf * af\n gf = wf * gf\n gf = gf * grad\n\n return [None, ga, gf]\n\n return F, gradient" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
orthogonalize features with an average over ensembles of estimators which are computed using an extra fold (hence 3 steps); this is a similar procedure as DML in Chernozhukov et al. (2017) but with an extra step in the middle instead of crossvalidation prior to estimation
def _three_step_orthogonalization( nfolds: int, tsize: int, df_folds: list, fold_combinations: tuple, nuisance_estim: list, ensemble_estim: list, ) -> tuple: # initiate the list storage for orthogonalized features orthogonalized_target_and_treatment = [] # routine is rerun nfold times so that each fold is used # in different tasks the same amount of times for cbn in fold_combinations: # determine what folds have what task in the current run of estimation linear_folds = cbn[: nfolds[0]] ensemble_folds = cbn[nfolds[0] : nfolds[0] + nfolds[1]] nuisance_folds = cbn[nfolds[0] + nfolds[1] :] # split samples into 3 parts: training the nuisance parameters; # determining ensemble weights; estimating the parameters of interest df_train = np.vstack([df_folds[c] for c in nuisance_folds]) df_ensemble = np.vstack([df_folds[c] for c in ensemble_folds]) df_params = np.vstack([df_folds[c] for c in linear_folds]) # initialize fitted values for target and treatment features estimators_ensemble = np.zeros( [df_ensemble.shape[0], tsize + 1, len(nuisance_estim)] ) estimators_linear_nuisance = np.zeros( [df_params.shape[0], tsize + 1, len(nuisance_estim)] ) estimators_linear_ensemble = np.zeros( [df_params.shape[0], tsize + 1, len(ensemble_estim)] ) # fit each variable of interest seperately against the nuisance params # and predict orthogonalized features using ensemble and linear samples for i in range(tsize + 1): for which, estim in enumerate(nuisance_estim): # train the model using the train sample only estim.fit(df_train[:, tsize + 1 :], df_train[:, i]) # predict on both ensemble and linear params samples estimators_ensemble[:, i, which] = estim.predict( df_ensemble[:, tsize + 1 :] ) estimators_linear_nuisance[:, i, which] = estim.predict( df_params[:, tsize + 1 :] ) for which, estim in enumerate(ensemble_estim): # train ensemble using fitted values from previous step estim.fit(estimators_ensemble[:, i, :], df_ensemble[:, i]) # and predict the features using fitted values on linear # parameters sample and trained weights on ensemble sample estimators_linear_ensemble[:, i, which] = estim.predict( estimators_linear_nuisance[:, i, :] ) # average over the predictions of different ensemble methods used averaged_ensembles = np.mean(estimators_linear_ensemble, axis=2) # orthonalize the target and linear features against fitted values orthogonal_features = df_params[:, : tsize + 1] - averaged_ensembles # note that order of linear folds needs to be preserved here orthogonalized_target_and_treatment.append(orthogonal_features) # combine list of orthogonalized features into a single array return np.vstack(orthogonalized_target_and_treatment)
[ "def _two_step_orthogonalization(\n nfolds: int,\n tsize: int,\n df_folds: list,\n fold_combinations: tuple,\n nuisance_estim: list,\n ensemble_weights: np.array,\n in_ensemble_weights=False,\n) -> tuple:\n # initiate the list storage for orthogonalized features\n orthogonalized_target_and_treatment = []\n\n for cbn in fold_combinations:\n # determine what folds have what task in the current run of estimation\n linear_folds = cbn[: nfolds[0]]\n nuisance_folds = cbn[nfolds[0] :]\n\n # split samples into 2 parts: training the nuisance parameters and\n # estimating the parameters of interest on orthogonalized features\n df_train = np.vstack([df_folds[c] for c in nuisance_folds])\n df_params = np.vstack([df_folds[c] for c in linear_folds])\n\n # initialize fitted values of treatment regressors\n fitted_values = np.zeros([df_params.shape[0], tsize + 1, len(nuisance_estim)])\n estimators_linear = np.zeros([df_params.shape[0], tsize + 1])\n\n # fit each variable of interest seperately against the nuisance params\n # note that there are tsize treatment features + 1 target feature\n for t in range(tsize + 1):\n for which, estim in enumerate(nuisance_estim):\n # train the model using nuisance sample\n estim.fit(df_train[:, tsize + 1 :], df_train[:, t])\n\n # fit values using the linear sample\n fitted_values[:, t, which] = estim.predict(df_params[:, tsize + 1 :])\n\n if in_ensemble_weights:\n tX = fitted_values[:, t, :]\n ensemble_weights[:, t] = np.linalg.inv(tX.T.dot(tX)).dot(\n tX.T.dot(df_params[:, t])\n )\n # use pre-computed weights to combine the nuisance estimators\n estimators_linear[:, t] = fitted_values[:, t, :].dot(ensemble_weights[:, t])\n\n # initialize orthogonalized features for each ensemble estimator\n orthogonal_features = df_params[:, : tsize + 1] - estimators_linear\n orthogonalized_target_and_treatment.append(orthogonal_features)\n\n # return stacked orthogonalized features; note that order\n # of observations needs to be preserved here\n return np.vstack(orthogonalized_target_and_treatment)", "def ensemble(dict_model_acc, test_design, method='vote'):\n pred_models_dict = {}\n pred_models_lst = []\n prob_models_dict = {}\n prob_models_lst = []\n prob1_models_lst = []\n acc_lst = []\n test_design = np.array(test_design)\n\n for name_model, (model, acc) in dict_model_acc.items():\n pred_model = model.predict(test_design).tolist()\n pred_models_dict[name_model] = pred_model\n pred_models_lst.append(pred_model)\n\n acc_lst.append(acc)\n\n pred_models_df = pd.DataFrame(pred_models_lst)\n\n if method == 'vote':\n pred_vote_df = pred_models_df.mode()\n pred_vote_lst = list(pred_vote_df.loc[0, :])\n\n return pred_vote_lst\n\n prob_models_dict = {}\n prob_models_lst = []\n prob1_models_lst = []\n acc_lst = []\n\n for name_model, (model, acc) in dict_model_acc.items():\n prob_model = model.predict_proba(test_design)\n prob1_model = np.array(prob_model)[:, 1].tolist()\n prob_models_dict[name_model] = prob_model\n prob1_models_lst.append(prob1_model)\n prob_models_lst.append(prob_model)\n\n acc_lst.append(acc)\n\n prob1_models_df = pd.DataFrame(prob1_models_lst)\n\n if method == 'avg_unif':\n prob1_avgunif_lst = list(prob1_models_df.mean())\n pred_avgunif_lst = [int(score > 0.5) for score in prob1_avgunif_lst]\n\n return pred_avgunif_lst, prob1_avgunif_lst\n elif method == 'avg_softmax':\n sum_exp_acc = sum(np.exp(acc_lst))\n acc_softmax = [np.exp(item) / sum_exp_acc for item in acc_lst]\n prob1_weighted_df = prob1_models_df.multiply(acc_softmax, axis='rows')\n prob1_softmax_lst = list(prob1_weighted_df.sum())\n pred_softmax_lst = [int(score > 0.5) for score in prob1_softmax_lst]\n\n return pred_softmax_lst, prob1_softmax_lst\n\n #elif method == 'grid_search':", "def ensemble_weights_cv(\n X: np.array,\n y: np.array,\n nuisance_estimators: list,\n ensemble_estimator: object,\n nfolds=5,\n) -> np.array:\n # stack features together for consistent splitting in cross-validation\n df = np.hstack([y, X])\n\n # create sum(nfolds) combinations of folds so that each piece of data is\n # used the same amount of times throughout the estimation\n fold_combinations = [\n list(range(i, nfolds)) + list(range(0, i)) for i in range(nfolds)\n ]\n\n # determine fold size and fold the dataset (approximately) evenly\n sample_fold = int(np.floor(df.shape[0] / nfolds))\n df_folds = np.split(df, [sample_fold * i for i in range(1, nfolds)])\n\n # initiate final weights matrix\n final_weights = np.zeros([len(nuisance_estimators), y.shape[1]])\n\n for cbn in fold_combinations:\n # assign roles to folds in the current run\n ensemble_sample = df_folds[0]\n train_sample = np.vstack(df_folds[1:])\n\n # initiate the weights for each ensemble and feature in this run\n current_run_weights = np.zeros([len(nuisance_estimators), y.shape[1]])\n for t in range(y.shape[1]):\n # initiate fitted values array\n fitted_values = np.zeros(\n [ensemble_sample.shape[0], len(nuisance_estimators)]\n )\n\n for which, estimator in enumerate(nuisance_estimators):\n # train the nuisance parameter estimator\n estimator.fit(train_sample[:, y.shape[1] :], train_sample[:, t])\n\n # fit the values on the ensemble sample\n fitted_values[:, which] = estimator.predict(\n ensemble_sample[:, y.shape[1] :]\n )\n # estimate weights of fitted values against ensemble sample target\n ensemble_estimator.fit(fitted_values, ensemble_sample[:, t])\n\n # store the weights for the feature t of the current run\n current_run_weights[:, t] = ensemble_estimator.coefs_\n\n # update final weights with set of weights for each of the k features\n # estimated divided by the number of nfold cross-validation runs\n final_weights += current_run_weights / nfolds\n\n return final_weights", "def exp(args):\n\n ####################################################################################################################\n #t_0 = time.time()\n # load the parameters from the arguments \n [x_init, i, seed, diff, h, f] = args\n sys_dim = len(x_init)\n\n\n # number of ensemble members generated from the initial condition\n N_ens = 100\n\n # time at which we compute an analysis of the ensemble in continuous time\n tanl = .01\n\n # the number of analyses we produce of the forward ensemble\n nanl = 2000\n\n # fourier truncation\n p = 1\n \n # static parameters based on fourier truncation\n RHO = rho(p)\n ALPHA = alpha(p)\n\n # set the storage for the ensemble means\n t_mean = np.zeros([sys_dim, nanl])\n e_mean = np.zeros([sys_dim, nanl])\n r_mean = np.zeros([sys_dim, nanl])\n a_mean = np.zeros([sys_dim, nanl])\n\n # set the storage for the spread of ensembles\n t_spread = np.zeros([nanl])\n e_spread = np.zeros([nanl])\n r_spread = np.zeros([nanl])\n a_spread = np.zeros([nanl])\n \n # we copy the initial condition into N_ens copies to forward propagate\n X_t_ens = np.tile(x_init, (N_ens, 1))\n X_e_ens = np.tile(x_init, (N_ens, 1))\n X_r_ens = np.tile(x_init, (N_ens, 1))\n X_a_ens = np.tile(x_init, (N_ens, 1))\n\n # set random seed for the same ensemble noise processes\n np.random.seed(seed)\n\n # for each forward time when we analyze the ensemble\n for j in range(nanl):\n #looping over the ensemble member\n for k in range(N_ens):\n # integrate until the next sample time\n for l in range(int(tanl/h)):\n # generate the weiner process over the interval at a fine discretization\n xi = np.random.standard_normal([sys_dim, int(round(tanl / 0.001))])\n\n # then compute the brownian motion a the current step size, re-normalized to unit variance\n tmp = np.zeros([sys_dim, int(round(tanl / h))])\n for m in range(int(round(tanl / h ))):\n tmp[:, m] = np.sum(xi[:, m * int(h / 0.001) : (m + 1) * int(h / 0.001)], axis=1) / np.sqrt(h / 0.001)\n \n # reset xi to be the Brownian path as generated by the finer discretization, normalized to have each component\n # drawn from a normal of unit variance\n xi = tmp\n\n\n # recursivley integrating one step forward via second order taylor, EM and RK schemes\n # note that the same weiner process is utilized for each integration scheme\n X_t_ens[k, :] = ty_step_path(X_t_ens[k, :], np.squeeze(xi[:, l]), h, [ALPHA, RHO, p, f, diff])\n X_e_ens[k, :] = em_step_path(X_e_ens[k, :], np.squeeze(xi[:, l]), h, [f, diff])\n X_r_ens[k, :] = rk_step_path(X_r_ens[k, :], np.squeeze(xi[:, l]), h, [f, diff])\n X_a_ens[k, :] = l96_rk4_step(X_r_ens[k, :], h, f)\n \n # make a final perturbation by the same Brownian process all at the end instead, for the ad hoc method\n ipdb.set_trace()\n X_a_ens[k, :] = X_a_ens[k, :] + diff * np.sum(xi * h, axis=1)\n \n ### then produce statistics of the ensemble at the analysis time\n \n # the ensemble mean for each method\n t_mean[:, j] = np.mean(X_t_ens, axis=0)\n e_mean[:, j] = np.mean(X_e_ens, axis=0)\n r_mean[:, j] = np.mean(X_r_ens, axis=0)\n a_mean[:, j] = np.mean(X_a_ens, axis=0)\n\n\t# we compute the spread as in whitaker & louge 98 by the standard deviation of the mean square deviation of the ensemble\n t_spread[j] = np.sqrt( ( 1 / (N_ens - 1) ) * np.sum(np.mean( (np.squeeze(t_mean[:, j]) - X_t_ens)**2, axis=1)))\n e_spread[j] = np.sqrt( ( 1 / (N_ens - 1) ) * np.sum(np.mean( (np.squeeze(e_mean[:, j]) - X_e_ens)**2, axis=1)))\n r_spread[j] = np.sqrt( ( 1 / (N_ens - 1) ) * np.sum(np.mean( (np.squeeze(r_mean[:, j]) - X_r_ens)**2, axis=1)))\n a_spread[j] = np.sqrt( ( 1 / (N_ens - 1) ) * np.sum(np.mean( (np.squeeze(a_mean[:, j]) - X_a_ens)**2, axis=1)))\n\n data = {\n 'e_mean': e_mean, 'e_spread': e_spread, \n 'r_mean': r_mean, 'r_spread': r_spread, \n 't_mean': t_mean, 't_spread': t_spread, \n 'a_mean': a_mean, 'a_spread': a_spread \n }\n \n fname = './data/ensemble_stats/' \\\n 'ensemble_statistics_h_' + str(h).zfill(3) + '_sys_dim_' + str(sys_dim).zfill(2) + '_tanl_' + \\\n str(tanl).zfill(3) + '_diffusion_' + str(diff).zfill(3) + \\\n '_init_con_' + str(i).zfill(6) + '.txt'\n \n f = open(fname, 'wb')\n pickle.dump(data, f)\n f.close()\n #print(time.time() - t_0)\n return i", "def vm_impl_reduce_mean(self):\n\n def vm_impl(x, axis):\n x = x.asnumpy()\n out = vm.mean(x, axis)\n return Tensor(out)\n\n return vm_impl", "def ensemble_averaging(self, setObservations, setStates, \n weighting_factor=\"unit\", maxiter=1000, impr=1):\n N = self.N\n W = 0\n hmmk = self.__class__(self.omega_X, self.omega_O)\n A_bar = zeros( (N, N))\n B_bar = zeros( (self.M, N))\n pi_bar = zeros(N)\n for k, obs in enumerate(setObservations):\n hmmk.A = self.A\n hmmk.B = self.B\n hmmk.pi = self.pi\n obsIndices = self._get_observationIndices(obs)\n state = setStates[k]\n hmmk._baum_welch(obsIndices, state, maxiter, impr)\n if weighting_factor == \"Pall\":\n Wk = hmmk._weighting_factor_Pall(setObservations)\n elif weighting_factor == \"Pk\":\n Wk = hmmk._weighting_factor_Pk(obs)\n else:\n Wk = 1\n A_bar = A_bar + Wk * hmmk.A\n B_bar = B_bar + Wk * hmmk.B\n pi_bar = pi_bar + Wk * hmmk.pi\n W = W + Wk\n if W == 0:\n W = 1\n print \"The ensemble averaging method did not converge\" \n else:\n self.A = A_bar / W\n self.B = B_bar / W\n self.pi = pi_bar / W\n self._mask()", "def compute_single_fidelity_and_approximate_control_variate_mean_estimates(\n target_cost, nsample_ratios, estimator,\n model_ensemble, seed):\n random_state = np.random.RandomState(seed)\n estimator.set_random_state(random_state)\n samples, values = estimator.generate_data(model_ensemble)\n # compute mean using only hf daa\n hf_mean = values[0][1].mean()\n # compute ACV mean\n acv_mean = estimator(values)\n return hf_mean, acv_mean", "def train_neural_mean(model, ws, x_train, y_train, z_train, loss_func, optimizer, epochs: int, batch_size: int):\n\n losses = np.zeros(epochs)\n train_accs = np.zeros(epochs)\n\n batch_count = len(x_train) // batch_size\n timer = TimeEstimator(epochs * batch_count)\n\n for epoch in range(epochs):\n model.train()\n\n shuffle = torch.randperm(len(x_train), device=DEVICE)\n\n epoch_loss = 0\n epoch_acc = 0\n\n prev_print_time = time.monotonic()\n\n for b in range(batch_count):\n batch_i = shuffle[b * batch_size: (b + 1) * batch_size]\n x_train_batch = x_train[batch_i]\n y_train_batch = y_train[batch_i][:, 0]\n z_train_batch = z_train[batch_i]\n predictions = model.forward(ws[x_train_batch])\n\n batch_loss = loss_func(predictions, y_train_batch)\n batch_loss = torch.sum(torch.mul(batch_loss, z_train_batch))\n\n batch_acc = accuracy(predictions, y_train_batch)\n\n epoch_loss += batch_loss / batch_count\n epoch_acc += batch_acc / batch_count\n\n optimizer.zero_grad()\n batch_loss.backward()\n optimizer.step()\n\n curr_time = time.monotonic()\n if curr_time - prev_print_time > 10:\n eta = timer.update(epoch * batch_size + b)\n print(f\" batch {b}/{batch_count}: loss {batch_loss.item():.4f} train acc {batch_acc:.4f} eta {eta}\")\n prev_print_time = curr_time\n\n print(f'Epoch {epoch}/{epochs}, loss {epoch_loss:.4f} acc {epoch_acc:.4f}')\n\n losses[epoch] = epoch_loss\n train_accs[epoch] = epoch_acc\n\n return losses, train_accs", "def add_arith_mean_cols(assay_results_df, input_dir):\n for metab in assay_results_df.index:\n resistant = assay_results_df.ix[metab, :6]\n sensitive = assay_results_df.ix[metab, 6:12]\n overall = assay_results_df.ix[metab, :12]\n\n for count, group in enumerate([resistant, sensitive, overall]):\n arith_mean = np.mean(group)\n arith_var = np.var(group)\n if count == 0:\n assay_results_df.ix[metab, 'resistant_amean'] = arith_mean\n assay_results_df.ix[metab, 'resistant_avar'] = arith_var\n if count == 1:\n assay_results_df.ix[metab, 'sensitive_amean'] = arith_mean\n assay_results_df.ix[metab, 'sensitive_avar'] = arith_var\n if count == 2:\n assay_results_df.ix[metab, 'overall_amean'] = arith_mean\n assay_results_df.ix[metab, 'overall_avar'] = arith_var\n\n assay_results_df.to_csv(input_dir + 'assay_results_extended.tsv',\n sep='\\t',\n na_rep='NaN')\n\n return assay_results_df", "def mapAndNormalizeFeatures(self):\n self.X_mapped = self.featureMap(self.X[self.shuffleIdx])\n \n # define splits for training, cross-validation, and test sets, with 60/20/20 split\n div1 = numpy.floor(self.m*0.6)\n div2 = numpy.floor(self.m*0.8)\n \n # normalize the features in the training set\n self.mean = numpy.mean(self.X_mapped[0:div1],0)\n self.stdev = numpy.std(self.X_mapped[0:div1],0)\n self.X_mapped = self.normalize(self.X_mapped) #(self.X-self.mean)/self.stdev\n \n self.X_train = self.X_mapped[0:div1]\n self.y_train = self.y[0:div1]\n self.X_cv = self.X_mapped[div1:div2]\n self.y_cv = self.y[div1:div2]\n self.X_test = self.X_mapped[div2:]\n self.y_test = self.y[div2:]", "def getAverageFeatureValues(self):\n averages = zeros(len(self.featureSet))\n for fvect in self.featureVectors: \n for i in range(len(self.featureSet)):\n averages[i] += fvect[i]\n \n for i in range(len(self.featureSet)):\n averages[i] /= len(self.featureVectors)\n\n return averages", "def train_one_fold(self):\n raise NotImplementedError(\"Can't call this method\")", "def evaluate(idxs):\n for key, result_analysis in analysis_dct.items():\n if \"avg-\" in key:\n new_idxs = list(set([i[0:-2] for i in idxs]))\n else:\n new_idxs = idxs\n # \n df_X = result_analysis.trinary.df_X\n ser_y = result_analysis.trinary.ser_y\n states = list(set(ser_y.values))\n #\n ensemble = classifier_ensemble.ClassifierEnsemble(\n filter_high_rank=100, size=100)\n ensemble.fit(df_X, ser_y)\n df_X_test = df_X.loc[new_idxs, :]\n ser_y_test = ser_y.loc[new_idxs]\n df_predict = ensemble.predict(df_X_test)\n df_predict[\"true state\"] = ser_y_test\n # Construct the predictions\n predictions = []\n for clf in ensemble.clfs:\n df_X_test_sub = df_X[ensemble.columns]\n dct = {i: [] for i in states}\n for idx in new_idxs:\n {i: dct[i].append(clf.coef_[i].dot(df_X_test_sub.loc[idx, :]))\n for i in states}\n df_result = pd.DataFrame(dct, index=new_idxs)\n predictions.append(df_result)\n result_analysis.df_predict = df_predict\n result_analysis.predictions = predictions\n result_analysis.ensemble = ensemble", "def ensemble_models_and_evaluate_accuracy(train_probas, val_probas, test_probas, y_train, y_val, y_test):\n train_eq_ensemble_pred = equally_ensemble_results(train_probas)\n val_eq_ensemble_pred = equally_ensemble_results(val_probas)\n test_eq_ensemble_pred = equally_ensemble_results(test_probas)\n\n print(\"Equally weighted ensemble:\")\n print(\"--------------------------\")\n print(\"Train accuracy: \", accuracy_score(y_train, train_eq_ensemble_pred))\n print(\"Validation accuracy: \", accuracy_score(y_val, val_eq_ensemble_pred))\n print(\"Test accuracy: \", accuracy_score(y_test, test_eq_ensemble_pred))\n\n np.save(os.path.join('model', 'train_eq_ensemble_pred'), train_eq_ensemble_pred)\n np.save(os.path.join('model', 'val_eq_ensemble_pred'), val_eq_ensemble_pred)\n np.save(os.path.join('model', 'test_eq_ensemble_pred'), test_eq_ensemble_pred)\n\n confidence_train = calculate_confidence_val(train_probas, y_train)\n confidence_val = calculate_confidence_val(val_probas, y_val)\n confidence_test = calculate_confidence_val(test_probas, y_test)\n\n train_w_ensemble_pred = weighted_ensemble_results(train_probas, confidence_train)\n val_w_ensemble_pred = weighted_ensemble_results(val_probas, confidence_val)\n test_w_ensemble_pred = weighted_ensemble_results(test_probas, confidence_test)\n\n print(\"Weighted ensemble:\")\n print(\"--------------------------\")\n print(\"Train accuracy: \", accuracy_score(y_train, train_w_ensemble_pred))\n print(\"Validation accuracy: \", accuracy_score(y_val, val_w_ensemble_pred))\n print(\"Test accuracy: \", accuracy_score(y_test, test_w_ensemble_pred))\n\n np.save(os.path.join('model', 'train_w_ensemble_pred.npy'), train_w_ensemble_pred)\n np.save(os.path.join('model', 'val_w_ensemble_pred.npy'), val_w_ensemble_pred)\n np.save(os.path.join('model', 'test_w_ensemble_pred.npy'), test_w_ensemble_pred)", "def infer_ensemble(data, network_list, trial_per_sample):\n data_var_img = Variable(data[0][0].float().cuda())\n data_var_angle = Variable(data[1].float().cuda())\n networks_logits = []\n for net in network_list:\n trial_outputs = net(data_var_img, data_var_angle, trials=trial_per_sample).data\n networks_logits.append(trial_outputs)\n networks_logits = torch.stack(networks_logits, 1).squeeze_()\n probabilities = torch.sigmoid(networks_logits)\n pred_mean = torch.mean(probabilities)\n pred_std = torch.std(probabilities)\n return pred_mean, pred_std", "def combine(all_ensembles):\n final_ensemble = copy(all_ensembles[0])\n final_ensemble.estimators_ = []\n\n for ensemble in all_ensembles:\n final_ensemble.estimators_ += ensemble.estimators_\n\n # Required in old versions of sklearn\n final_ensemble.n_estimators = len(final_ensemble.estimators_)\n\n return final_ensemble", "def adabelief(\n params: List[Tensor],\n grads: List[Tensor],\n exp_avgs: List[Tensor],\n exp_avg_sqs: List[Tensor],\n max_exp_avg_sqs: List[Tensor],\n state_steps: List[int],\n amsgrad: bool,\n beta1: float,\n beta2: float,\n lr: float,\n weight_decay: float,\n eps: float,\n) -> None:\n\n for i, param in enumerate(params):\n\n grad = grads[i]\n exp_avg = exp_avgs[i]\n exp_avg_sq = exp_avg_sqs[i]\n step = state_steps[i]\n if amsgrad:\n max_exp_avg_sq = max_exp_avg_sqs[i]\n\n bias_correction1 = 1 - beta1**step\n bias_correction2 = 1 - beta2**step\n\n if weight_decay != 0:\n grad = grad.add(param, alpha=weight_decay)\n\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)\n grad_residual = grad - exp_avg\n exp_avg_sq.mul_(beta2).addcmul_(grad_residual, grad_residual, value=1 - beta2)\n\n if amsgrad:\n # Maintains the maximum of all 2nd moment running avg. till now\n torch.maximum(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)\n # Use the max. for normalizing running avg. of gradient\n denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)\n else:\n denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)\n\n step_size = lr / bias_correction1\n\n param.addcdiv_(exp_avg, denom, value=-step_size)", "def _calculateAggregateFeatures(self) -> None:\n return", "def update_average_model(self, model):\n for model_param, average_param in zip(model.parameters(), self.average_model.parameters()):\n # EWMA average model update\n average_param.data.mul_(self.average_model_alpha).add_(model_param.data * (1 - self.average_model_alpha))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
helper function to preestimate ensemble weights for k features in Double Machine Learning algorithm using nfolds crossvalidation
def ensemble_weights_cv( X: np.array, y: np.array, nuisance_estimators: list, ensemble_estimator: object, nfolds=5, ) -> np.array: # stack features together for consistent splitting in cross-validation df = np.hstack([y, X]) # create sum(nfolds) combinations of folds so that each piece of data is # used the same amount of times throughout the estimation fold_combinations = [ list(range(i, nfolds)) + list(range(0, i)) for i in range(nfolds) ] # determine fold size and fold the dataset (approximately) evenly sample_fold = int(np.floor(df.shape[0] / nfolds)) df_folds = np.split(df, [sample_fold * i for i in range(1, nfolds)]) # initiate final weights matrix final_weights = np.zeros([len(nuisance_estimators), y.shape[1]]) for cbn in fold_combinations: # assign roles to folds in the current run ensemble_sample = df_folds[0] train_sample = np.vstack(df_folds[1:]) # initiate the weights for each ensemble and feature in this run current_run_weights = np.zeros([len(nuisance_estimators), y.shape[1]]) for t in range(y.shape[1]): # initiate fitted values array fitted_values = np.zeros( [ensemble_sample.shape[0], len(nuisance_estimators)] ) for which, estimator in enumerate(nuisance_estimators): # train the nuisance parameter estimator estimator.fit(train_sample[:, y.shape[1] :], train_sample[:, t]) # fit the values on the ensemble sample fitted_values[:, which] = estimator.predict( ensemble_sample[:, y.shape[1] :] ) # estimate weights of fitted values against ensemble sample target ensemble_estimator.fit(fitted_values, ensemble_sample[:, t]) # store the weights for the feature t of the current run current_run_weights[:, t] = ensemble_estimator.coefs_ # update final weights with set of weights for each of the k features # estimated divided by the number of nfold cross-validation runs final_weights += current_run_weights / nfolds return final_weights
[ "def kfolds_making(dataset,label,K=10):\n #data = dataset-1\n #label = dataset-1\n dataset, label = shuffle(dataset, label, random_state=0)\n print(label)\n kfold_DT = KFold(K)\n\n performances_DT = 0\n performances_NN = 0\n performances_SVM = 0\n performances_KNN = 0\n #print()\n\n i=0\n for trn_idx, tst_idx in kfold_DT.split(dataset):\n print(i)\n i = i+1\n performances_DT = performances_DT + decision_tree2(dataset,label.ravel(),trn_idx,tst_idx)\n performances_NN = performances_NN + nn2(dataset,label.ravel(),trn_idx,tst_idx)\n performances_SVM = performances_SVM + svm2(dataset,label.ravel(),trn_idx,tst_idx)\n performances_KNN = performances_KNN + knn(dataset,label.ravel(),trn_idx,tst_idx)\n\n \n performances_DT = performances_DT/K\n performances_NN = performances_NN/K\n performances_SVM = performances_SVM/K\n performances_KNN = performances_KNN/K\n\n print(round(performances_DT, 3), \"accuracy for decision tree\")\n print(round(performances_NN, 3), \"accuracy for multi layer perceptron\")\n print(round(performances_SVM, 3), \"accuracy for support vector classifier \")\n print(round(performances_KNN, 3), \"accuracy for K nearest neighbors \")", "def train_k_fold(self):\n all_errors = []\n for current_k in range(self.k_folds):\n self.set_data(*self.evaluator.get_fold(current_k, self.fold_test_indices))\n self.hyperparameters['fold'] = current_k\n self.train_one_fold()\n all_errors.append(self.get_evaluation_report())\n return numpy.mean(all_errors, axis=0)", "def kfold_CV(model, X, y, k=4):\n ...\n return np.array(accuracies)", "def test_can_do_k_fold_cv(self):\n plumber = SKPlumber(\"classification\", 1, evaluator=make_kfold_evaluator(3))\n X, y = load_dataset(\"iris\")\n # Should be able to do k-fold cross validation.\n plumber.fit(X, y)", "def cross_validation(dataset, k=10):\n fold_errV = 0\n n = len(dataset.examples)\n examples = dataset.examples\n for fold in range(k):\n random.shuffle(dataset.examples)\n train_data, val_data = train_test_split(dataset, fold * (n / k), (fold + 1) * (n / k))\n dataset.examples = train_data\n h = DecisionTreeLearner(dataset)\n fold_errV += err_ratio(h, dataset, val_data)\n\n # Reverting back to original once test is completed\n dataset.examples = examples\n return fold_errV/k", "def check_reweighting_by_ML_gb(original, target, original_weights, target_weights=None, n_iterations=1):\n if target_weights is None:\n target_weights = numpy.ones(target.shape[0])\n \n aucs = []\n \n data = numpy.concatenate([original, target])\n labels = numpy.array([0] * original.shape[0] + [1] * target.shape[0])\n W = numpy.concatenate([original_weights, target_weights])\n \n for _ in range(n_iterations):\n Xtr, Xts, Ytr, Yts, Wtr, Wts = train_test_split(data, labels, W, train_size=0.51)\n \n original_w = Wtr[Ytr == 0] \n target_w = Wtr[Ytr == 1] \n\n original_w /= numpy.sum(original_w)\n target_w /= numpy.sum(target_w)\n\n original_tr = Xtr[Ytr == 0]\n target_tr = Xtr[Ytr == 1]\n \n # put target events with negative weights into original samples with -weights\n data_neg = target_tr[target_w < 0]\n weights_neg = -target_w[target_w < 0]\n original_tr = numpy.concatenate((original_tr, data_neg))\n original_w = numpy.concatenate((original_w, weights_neg))\n target_tr = target_tr[target_w >= 0]\n target_w = target_w[target_w >= 0]\n \n Xtr = numpy.concatenate([original_tr, target_tr])\n Ytr = numpy.array([0] * original_tr.shape[0] + [1] * target_tr.shape[0])\n Wtr = numpy.concatenate([original_w, target_w])\n \n clf = GradientBoostingClassifier(n_estimators=200, subsample=0.5, \n min_samples_leaf=100, learning_rate=0.1, \n max_depth=6).fit(Xtr, Ytr, sample_weight=Wtr)\n proba = clf.predict_proba(Xts)[:, 1]\n aucs.append(roc_auc_score(Yts, proba, sample_weight=Wts))\n \n fpr, tpr, _ = roc_curve(Yts, proba, sample_weight=Wts)\n return fpr, tpr, numpy.mean(aucs), numpy.std(aucs)", "def kFoldCrossValidation(data, k=5):\r\n totalObservations = len(data)\r\n classifierSet = {observation.classifier for observation in data} # the set of all observations with their classifier\r\n stratifyByClass = {}\r\n for classifier in classifierSet:\r\n stratifyByClass[classifier] = [observation for observation in data if observation.classifier == classifier] # stratify the data using a dictionary to hold each class name\r\n for key, observations in stratifyByClass.items():\r\n stratifyByClass[key] = {\"count\": len(observations), \"observations\": observations}\r\n probabilities = {classifier: stratifiedObservations[\"count\"] / totalObservations for classifier, stratifiedObservations in stratifyByClass.items()}\r\n crossFolds = []\r\n for i in range(k):\r\n crossFold = []\r\n for classifier, probability in probabilities.items():\r\n crossFoldObservations = []\r\n while len(crossFoldObservations) / (totalObservations / k) < probability:\r\n if len(stratifyByClass[classifier][\"observations\"]) == 0:\r\n break\r\n crossFoldObservations.append(\r\n stratifyByClass[classifier][\"observations\"].pop(\r\n random.randint(\r\n 0,\r\n len(stratifyByClass[classifier][\"observations\"]) - 1\r\n )\r\n )\r\n )\r\n crossFold.extend(crossFoldObservations)\r\n crossFolds.append(crossFold)\r\n return crossFolds", "def multi_bw(init, y, X, n, k, family, tol, max_iter, rss_score, gwr_func,\n bw_func, sel_func, multi_bw_min, multi_bw_max, bws_same_times,\n verbose=False):\n if init is None:\n bw = sel_func(bw_func(y, X))\n optim_model = gwr_func(y, X, bw)\n else:\n bw = init\n optim_model = gwr_func(y, X, init)\n bw_gwr = bw\n err = optim_model.resid_response.reshape((-1, 1))\n param = optim_model.params\n\n XB = np.multiply(param, X)\n if rss_score:\n rss = np.sum((err)**2)\n iters = 0\n scores = []\n delta = 1e6\n BWs = []\n bw_stable_counter = 0\n bws = np.empty(k)\n gwr_sel_hist = []\n\n try:\n from tqdm.auto import tqdm #if they have it, let users have a progress bar\n except ImportError:\n\n def tqdm(x, desc=''): #otherwise, just passthrough the range\n return x\n\n for iters in tqdm(range(1, max_iter + 1), desc='Backfitting'):\n new_XB = np.zeros_like(X)\n params = np.zeros_like(X)\n\n for j in range(k):\n temp_y = XB[:, j].reshape((-1, 1))\n temp_y = temp_y + err\n temp_X = X[:, j].reshape((-1, 1))\n bw_class = bw_func(temp_y, temp_X)\n\n if bw_stable_counter >= bws_same_times:\n #If in backfitting, all bws not changing in bws_same_times (default 5) iterations\n bw = bws[j]\n else:\n bw = sel_func(bw_class, multi_bw_min[j], multi_bw_max[j])\n gwr_sel_hist.append(deepcopy(bw_class.sel_hist))\n\n optim_model = gwr_func(temp_y, temp_X, bw)\n err = optim_model.resid_response.reshape((-1, 1))\n param = optim_model.params.reshape((-1, ))\n new_XB[:, j] = optim_model.predy.reshape(-1)\n params[:, j] = param\n bws[j] = bw\n \n #If bws remain the same as from previous iteration\n if (iters > 1) and np.all(BWs[-1] == bws):\n bw_stable_counter += 1\n else:\n bw_stable_counter = 0\n \n num = np.sum((new_XB - XB)**2) / n\n den = np.sum(np.sum(new_XB, axis=1)**2)\n score = (num / den)**0.5\n XB = new_XB\n\n if rss_score:\n predy = np.sum(np.multiply(params, X), axis=1).reshape((-1, 1))\n new_rss = np.sum((y - predy)**2)\n score = np.abs((new_rss - rss) / new_rss)\n rss = new_rss\n scores.append(deepcopy(score))\n delta = score\n BWs.append(deepcopy(bws))\n\n if verbose:\n print(\"Current iteration:\", iters, \",SOC:\", np.round(score, 7))\n print(\"Bandwidths:\", ', '.join([str(bw) for bw in bws]))\n\n if delta < tol:\n break\n\n opt_bws = BWs[-1]\n return (opt_bws, np.array(BWs), np.array(scores), params, err, gwr_sel_hist, bw_gwr)", "def k_fold_xval_stratified(self, k=3, autoskip=False):\r\n session, metadata = db.get_session_or_error()\r\n\r\n # Will hold the score of each folds.\r\n scores = {}\r\n\r\n # Get a list of all the photo IDs in the database.\r\n samples = db.get_photos_with_taxa(session, metadata)\r\n\r\n # Get a list of the photo IDs and a list of the classes. The classes\r\n # are needed for the stratified cross validation.\r\n photo_ids = []\r\n classes = []\r\n for x in samples:\r\n photo_ids.append(x[0].id)\r\n tmp = np.array(x[1:]).astype(str)\r\n classes.append('_'.join(tmp))\r\n\r\n # Numpy features are needed for these.\r\n photo_ids = np.array(photo_ids)\r\n classes = np.array(classes)\r\n\r\n # Count the number of each class.\r\n class_counts = Counter(classes)\r\n\r\n if autoskip:\r\n # Create a mask for the classes that have enough members and remove\r\n # the photo IDs that don't have enough members.\r\n mask = []\r\n for i, c in enumerate(classes):\r\n if class_counts[c] >= k:\r\n mask.append(i)\r\n\r\n photo_ids = photo_ids[mask]\r\n classes = classes[mask]\r\n else:\r\n for label, count in class_counts.items():\r\n assert count >= k, \"Class {0} has only {1} members, which \" \\\r\n \"is too few. The minimum number of labels for any \" \\\r\n \"class cannot be less than k={2}. Use --autoskip to skip \" \\\r\n \"classes with too few members.\".format(label, count, k)\r\n\r\n if autoskip:\r\n photo_count_min = k\r\n else:\r\n photo_count_min = 0\r\n\r\n # Train data exporter.\r\n train_data = BatchMakeTrainData(self.config, self.cache_dir)\r\n train_data.set_photo_count_min(photo_count_min)\r\n\r\n # Set the trainer.\r\n trainer = BatchMakeAnn(self.config)\r\n trainer.set_photo_count_min(photo_count_min)\r\n if self.aivolver_config_path:\r\n trainer.set_training_method('aivolver', self.aivolver_config_path)\r\n\r\n # Set the ANN tester.\r\n tester = TestAnn(self.config)\r\n tester.set_photo_count_min(photo_count_min)\r\n\r\n # Obtain cross validation folds.\r\n folds = cross_validation.StratifiedKFold(classes, k)\r\n result_dir = os.path.join(self.temp_dir, 'results')\r\n for i, (train_idx, test_idx) in enumerate(folds):\r\n # Make data directories.\r\n train_dir = os.path.join(self.temp_dir, 'train', str(i))\r\n test_dir = os.path.join(self.temp_dir, 'test', str(i))\r\n ann_dir = os.path.join(self.temp_dir, 'ann', str(i))\r\n test_result = os.path.join(result_dir, '{0}.tsv'.format(i))\r\n\r\n for path in (train_dir,test_dir,ann_dir,result_dir):\r\n if not os.path.isdir(path):\r\n os.makedirs(path)\r\n\r\n # Make train data for this fold.\r\n train_samples = photo_ids[train_idx]\r\n train_data.set_subset(train_samples)\r\n train_data.batch_export(train_dir)\r\n\r\n # Make test data for this fold.\r\n test_samples = photo_ids[test_idx]\r\n train_data.set_subset(test_samples)\r\n train_data.batch_export(test_dir, train_dir)\r\n\r\n # Train neural networks on training data.\r\n trainer.batch_train(data_dir=train_dir, output_dir=ann_dir)\r\n\r\n # Calculate the score for this fold.\r\n tester.test_with_hierarchy(test_dir, ann_dir)\r\n tester.export_hierarchy_results(test_result)\r\n\r\n # List all level combinations.\r\n try:\r\n class_hr = self.config.classification.hierarchy\r\n hr = [level.name for level in class_hr]\r\n except:\r\n raise ConfigurationError(\"classification hierarchy not set\")\r\n level_filters = []\r\n ranks = []\r\n for i in range(len(hr)):\r\n ranks.append(hr[i])\r\n level_filters.append(ranks)\r\n level_filters = tuple(level_filters)\r\n\r\n for filter_ in level_filters:\r\n correct, total = tester.get_correct_count(filter_)\r\n score = float(correct) / total\r\n\r\n filter_s = \"/\".join(filter_)\r\n if filter_s not in scores:\r\n scores[filter_s] = []\r\n scores[filter_s].append(score)\r\n\r\n return scores", "def cross_validation(Ps, data, algo, kfolds=5, **kwargs):\n scores_tr = np.zeros((kfolds, len(Ps)))\n scores_te = np.zeros((kfolds, len(Ps)))\n X_tr, y_tr, X_te, y_te, _ = data\n X_train_ = pd.concat((X_tr, X_te)).reset_index(drop=True).sample(frac=1)\n y_train_ = pd.concat((y_tr, y_te)).reset_index(drop=True).iloc[X_train_.index]\n X_train_, y_train_ = X_train_.reset_index(drop=True), y_train_.reset_index(drop=True)\n n = X_train_.shape[0]\n p = int(n // kfolds)\n for k in tqdm(range(kfolds)):\n print('Fold {}'.format(k+1))\n q = p * (k + 1) + n % kfolds if k == kfolds - 1 else p * (k + 1)\n idx_val = np.arange(p * k, q)\n idx_train = np.setdiff1d(np.arange(n), idx_val)\n X_train, y_train = X_train_.iloc[idx_train, :], y_train_.iloc[idx_train, :]\n X_val, y_val = X_train_.iloc[idx_val, :], y_train_.iloc[idx_val, :]\n s_tr, s_te = [], []\n for P in Ps:\n if algo == 'CSVM':\n alg = C_SVM(C=P, print_callbacks=False, **kwargs)\n elif algo == 'KLR':\n alg = KLR(lbda=P, **kwargs)\n elif algo == 'KRR':\n alg = KRR(lbda=P, **kwargs)\n else:\n NotImplementedError('Please choose between \"CSVM\", \"KRR\" or \"KLR\"')\n alg.fit(X_train, y_train)\n pred_tr = alg.predict(X_train)\n score_tr = alg.score(pred_tr, y_train)\n pred_te = alg.predict(X_val)\n score_te = alg.score(pred_te, y_val)\n s_tr.append(score_tr)\n s_te.append(score_te)\n print('Constant={}, train_acc={:0.4f}, val_acc={:0.4f}'.format(P, score_tr, score_te))\n scores_tr[k], scores_te[k] = s_tr, s_te\n mean_scores_tr, mean_scores_te = np.mean(scores_tr, axis=0), np.mean(scores_te, axis=0)\n p_opt = Ps[np.argmax(mean_scores_te)]\n print('Best constant={}, val_acc={:0.4f}'.format(p_opt, np.max(mean_scores_te)))\n return p_opt, scores_tr, scores_te, mean_scores_tr, mean_scores_te", "def k_fold_split(dataset, k):\n # TODO\n # can be done by using sklearn, but only with already tensorized dataset\n pass", "def kfold(data, labels, n_folds, train_method, pred_method, classify_method, labels_formatting, metric, target_folds, verbose=True, **kwargs):\n try:\n assert n_folds > 1\n except AssertionError:\n print('Need more than one fold')\n\n try:\n assert len(data) == len(labels)\n except AssertionError:\n print('Error: Data and labels have different lengths') \n \n if verbose: print('Engaging n-fold cross validation with {0:d} folds on {1:d} items'.format(n_folds, len(data))) \n fold_size = int(len(data)/n_folds)\n # Random permuation of the data\n perm = np.random.permutation(len(data))\n data = data[perm]\n labels = labels[perm]\n\n res = []\n for fold in range(n_folds):\n if target_folds is not None and fold not in target_folds:\n res.append(np.nan)\n continue\n val_idx = range(fold*fold_size,(fold+1)*fold_size)\n val_data = np.array(data[val_idx])\n val_labels = np.array(labels[val_idx])\n\n train_data = np.array([element for i, element in enumerate(data) if i not in val_idx])\n train_labels = np.array([element for i, element in enumerate(labels) if i not in val_idx])\n\n train_method(train_data, train_labels, **kwargs)\n\n preds = pred_method(val_data, **kwargs)\n \n if metric.quantized:\n preds = classify_method(preds)\n res.append(metric.measure(np.ravel(preds), labels_formatting(val_labels)))\n if verbose: print('Fold {0:d}, {1:s}: {2:.2f}'.format(fold,metric.name,res[fold]))\n\n if verbose: print('Done! Average {0:s} is {1:.2f}'.format(metric.name,np.nanmean(res)))\n\n return np.nanmean(res)", "def mRmR(X, Y, clf, n):\n\n\n candidate_feature_indices = np.arange(X.shape[-1])\n feature_sets = []\n \n # Phase 1: Create Sequential Feature Sets [S1, S2, S3, ... Sn] #\n for i in range(n):\n print('Computing Feature Set S%s' % (i + 1)) \n relevance = MID(X[:,candidate_feature_indices], Y)\n redundancy = np.zeros(len(relevance))\n\n try:\n for k in feature_sets[i - 1]:\n redundancy += MIC(X[:, candidate_feature_indices], X[:, k])\n redundancy /= len(redundancy)\n except:\n pass # feature_sets -> Empty list\n\n score = relevance - redundancy\n best_feature_index = np.argmax(score)\n if feature_sets:\n feature_sets.append(feature_sets[-1] + \n [candidate_feature_indices[best_feature_index]])\n else:\n feature_sets.append([candidate_feature_indices[best_feature_index]])\n\n candidate_feature_indices = np.delete(candidate_feature_indices, \n best_feature_index)\n \n # Phase 2: Validate Feature Set Performance #\n feature_set_scores = []\n for feature_set in feature_sets:\n kf = KFold(n_splits = 5)\n avg_accuracy = 0\n for train_index, test_index in kf.split(X, Y):\n clf.fit(X[train_index][:, feature_set],Y[train_index])\n avg_accuracy += clf.score(X[test_index][:, feature_set], Y[test_index])\n feature_set_scores.append(avg_accuracy / 5)\n\n\n # Phase 3: Find Best Possible Subspace, For The Best Calculated Feature Space Sk #\n best_feature_subset = feature_sets[np.argmax(feature_set_scores)]\n best_subset_score = np.max(feature_set_scores)\n found_better_subset = True\n\n while found_better_subset and len(best_feature_subset) > 1:\n feature_subsets = [best_feature_subset[:k] + best_feature_subset[k + 1:] \n for k in range(len(best_feature_subset))]\n feature_subset_scores = []\n\n for feature_set in feature_subsets:\n kf = KFold(n_splits = 5)\n avg_accuracy = 0\n for train_index, test_index in kf.split(X, Y):\n clf.fit(X[train_index][:, feature_set],Y[train_index])\n avg_accuracy += clf.score(X[test_index][:, feature_set], Y[test_index])\n feature_subset_scores.append(avg_accuracy / 5)\n \n if np.any(feature_subset_scores > best_subset_score):\n best_subset_score = np.max(feature_subset_scores)\n best_feature_subset = feature_subsets[np.argmax(feature_subset_scores)]\n else:\n found_better_subset = False\n\n return best_feature_subset", "def evaluate_features():\n # training set is from Stanford Sentiment Training Set\n training_set = parse_stanford(\"data/stanfordSentimentTreebank/stanfordSentimentTreebank/dictionary.txt\", \n \"data/stanfordSentimentTreebank/stanfordSentimentTreebank/sentiment_labels.txt\")\n # train weights for maxent model\n weights = train_maxent(training_set)\n # sort weights in descending order\n sorted_weights = { sentiment: sorted(weights[sentiment].iteritems(), \n key=lambda x:x[1], \n reverse=True) \n for sentiment in weights}\n\n # evaluate model for the top i weights, in this range (There should be # ~130000 weights total)\n for i in range(10000, 130000, 10000):\n # get the top i weights\n new_weights = {\"positive\": {}, \"negative\": {}, \"neutral\": {}}\n for sentiment in sorted_weights:\n new_weights[sentiment] = {w[0]:weights[sentiment][w[0]] \n for w in sorted_weights[sentiment][:i-1]}\n\n # load the episode that has gold standard features already assigned\n episode = parse_goldstandard(\"data/s1e9_gold.txt\", 1, 9)\n # calculate bag of words sentiments\n word_sentiments = parse_NRC(\"data/NRC-Emotion-Lexicon-v0.92/NRC-Emotion-Lexicon-v0.92/NRC-emotion-lexicon-wordlevel-alphabetized-v0.92.txt\")\n bag_of_words(episode, word_sentiments)\n # calculate maxent sentiments\n run_maxent(episode, new_weights)\n\n # evaulate maxent and bag_of_words sentiments against baseline\n print \"%s max_ent vs gold: %s\" % (i, compare_scores(episode, \n score1=\"maxent_score\", \n score2=\"gold_score\"))\n print \"%s bow vs gold: %s\" % (i, compare_scores(episode, \n \"bow_score\", \n score2=\"gold_score\"))", "def evaluate_model(window_len, folds_X_train, folds_Y_train, folds_X_test, folds_Y_test, folds_price_test,\n folds_curr_target_prep_train, folds_curr_target_prep_test, features_names, model_class, model_args,y_col):\n print \"evaluate model\"\n print model_class.__name__\n future_ys = folds_Y_train[0].columns.tolist()\n evaluations = []\n evaluations_values = []\n if window_len > 0:\n features = [(features_name, wl) for wl in range(window_len) for features_name in features_names]\n else:\n features = features_names\n # iterate folds\n for f in range(len(folds_X_train)):\n print 'fold' + str(f)\n X_train = folds_X_train[f][features]\n X_test = folds_X_test[f][features]\n\n # X_train['ind'] = range(len(X_train))\n # X_train = X_train.set_index(['ind'], append=True)\n # X_test_index = X_test.index\n # X_test = X_test.set_index(range(len(X_train)),append=True)\n # folds_Y_train_index_f = folds_Y_train[f].index\n # folds_Y_train[f] = folds_Y_train[f].set_index(range(len(X_train)),append=True)\n # folds_curr_target_prep_train_index_f = folds_curr_target_prep_train[f].index\n # folds_curr_target_prep_train[f] = folds_curr_target_prep_train[f].set_index(range(len(X_train)),append=True)\n # folds_Y_test_index_f = folds_Y_test[f].index\n # folds_Y_test[f] = folds_Y_test[f].set_index(range(len(X_train)),append=True)\n # folds_curr_target_prep_test_index_f = folds_curr_target_prep_test[f].index\n # folds_curr_target_prep_test[f] = folds_curr_target_prep_test[f].set_index(range(len(X_train)),append=True)\n # folds_price_test_index_f = folds_price_test[f].index\n # folds_price_test[f] = folds_price_test[f].set_index(range(len(X_train)),append=True)\n # iterate future value to predict\n for t in future_ys:\n print 'next t' + str(t)\n\n y_train = folds_Y_train[f].loc[X_train.index][t]\n X_train_curr_price_prep = folds_curr_target_prep_train[f].loc[X_train.index]\n\n y_test = folds_Y_test[f][t].loc[X_test.index]\n X_test_curr_price_prep = folds_curr_target_prep_test[f].loc[y_test.index].tolist()\n price_test = folds_price_test[f].loc[y_test.index]\n\n model = model_class(**model_args)\n\n if isinstance(model, RegressorMixin):\n model.fit(X_train, y_train)\n y_preds_val = model.predict(X_test)\n if (y_col == 'Close_norm'):\n y_preds_binary = np.sign(y_preds_val - X_test_curr_price_prep)\n y_preds_binary = [1 if x == 0 else x for x in y_preds_binary]\n elif (y_col == 'Close_proc'):\n y_preds_binary = np.sign(y_preds_val)\n\n else:\n if (y_col == 'Close_norm'):\n y_train_binary = np.sign(y_train - X_train_curr_price_prep)\n y_train_binary = [1 if x == 0 else x for x in y_train_binary]\n elif (y_col == 'Close_proc'):\n y_train_binary = np.sign(y_train)\n\n model.fit(X_train, y_train_binary)\n y_preds_binary = model.predict(X_test)\n\n fold_eval = {}\n fold_eval[\"fold\"] = f\n fold_eval[\"model\"] = model_class.__name__\n fold_eval[\"next_t\"] = t\n\n eval_values = pd.DataFrame()\n eval_values['curr_price'] = price_test\n eval_values['preds'] = y_preds_binary\n\n if (y_col == 'Close_norm'):\n y_test_binary = np.sign(y_test - X_test_curr_price_prep)\n y_test_binary = [1 if x == 0 else x for x in y_test_binary]\n elif (y_col == 'Close_proc'):\n y_test_binary = np.sign(y_test)\n\n eval_values['y'] = y_test_binary\n # eval_values['curr_price2'] = folds_price_test[f][t].values\n for k1, v in fold_eval.items():\n eval_values[k1] = v\n\n evals = dict(fold_eval)\n evals['accuracy_score'] = accuracy_score(y_test_binary, y_preds_binary)\n evals['f1_score'] = f1_score(y_test_binary, y_preds_binary, average='macro')\n evals['precision_score'] = precision_score(y_test_binary, y_preds_binary, average='macro')\n\n if not isinstance(model, RegressorMixin):\n # y_proba = model.predict_proba(X_test)\n try:\n evals['roc_auc_score'] = roc_auc_score(y_test_binary, y_preds_binary, average='macro')\n except:\n evals['roc_auc_score'] = -1\n else:\n evals['roc_auc_score'] = 0\n\n evals[\"long_short_profit\"], eval_values[\"long_short_profit\"] = long_short_profit_evaluation(\n price_test.tolist(), y_preds_binary)\n evals[\"sharp_ratio\"] = np.mean(eval_values[\"long_short_profit\"]) / (\n np.std(eval_values[\"long_short_profit\"]) + 0.0001)\n\n evaluations.append(evals)\n evaluations_values.append(eval_values)\n\n return pd.DataFrame(evaluations), pd.concat(evaluations_values)", "def xgboost_weight(data, feature_num, iter_num):\n\n vim = np.zeros((data.shape[1], feature_num)).tolist() # vim: weights of Regulatory network\n for i in range(0, data.shape[1]):\n print(\"----------------------------------------------------------------\", i,\n \"----------------------------------------------------------------\")\n\n # split train and test data set\n y = data[:, i]\n #print('the value of y is : ', y)\n if i == 0:\n x = data[:, 1:feature_num]\n elif i < feature_num:\n x = np.hstack((data[:, 0:i], data[:, i + 1:feature_num]))\n else:\n x = data[:, 0:feature_num]\n\n print('shape of x is : ', np.shape(x))\n\n # Build model\n params = {\n\n 'booster': 'gbtree',\n 'max_depth': 4,\n 'min_child_weight':4 ,\n 'lambda': 0,\n 'subsample': 0.7,\n 'colsample_bytree': 0.9,\n 'silent': 1,\n 'eta': 0.0008\n }\n\n dtrain = xgb.DMatrix(x, y)\n plst = params.items()\n model = xgb.train(plst, dtrain, iter_num)\n\n # Compute and sort feature importance\n importance = model.get_fscore()\n #importance = model.get_score(fmap='', importance_type='total_gain')\n importance = sorted(importance.items(), key=operator.itemgetter(1), reverse=True)\n print('size of importance is : ', np.shape(importance))\n\n # Convert the importance list to matrix weights\n for j in range(0, len(importance)):\n num = re.findall(r'\\d+', importance[j][0])\n num = np.array(num)\n num = np.core.defchararray.strip(num, '()')\n num = int(num)\n if i >= feature_num - 1:\n fea_num = num\n else:\n if num < i:\n fea_num = num\n else:\n fea_num = num + 1\n vim[i][fea_num] = importance[j][1]\n\n return vim", "def ib3(X_train, y_train):\n classes = np.unique(y_train)\n \n # Start with the first element.\n x_train_reduced = np.asarray([X_train[0,:]])\n y_train_reduced = np.asarray([y_train[0]])\n acceptable = np.array([0])\n \n lower = lambda p,z,n: (p + (z**2)/(2*n) - z*((p*(1-p)/n + (z**2)/(4*n**2)))**0.5)/(1 + (z**2)/n)\n upper = lambda p,z,n: (p + (z**2)/(2*n) + z*((p*(1-p)/n + (z**2)/(4*n**2)))**0.5)/(1 + (z**2)/n)\n \n for index, (x_instance, y_instance) in enumerate(zip(X_train, y_train)):\n\n best_knn = self.knn\n best_knn.fit(x_train_reduced, y_train_reduced)\n# print(x_train_reduced)\n y_pred_instance = best_knn.predict(np.asarray([x_instance]))\n\n # This part is similar to IB2\n if y_pred_instance != y_instance:\n x_train_reduced = np.vstack([x_train_reduced, x_instance])\n acceptable = np.hstack([acceptable, index])\n \n \n incorrect_class = 0\n correct_class = 0\n \n # Not going on onced got the expected value\n if len(acceptable) > len(y_train)/30: \n break\n \n # This part differ from IB2, just acceptable instance are kept.\n # Count the number of incorrect and correct classification\n for x_instance_reduced in x_train_reduced:\n best_knn = self.knn\n best_knn.fit(x_train_reduced, y_train_reduced)\n y_pred_instance_reduced = best_knn.predict(np.asarray([x_instance_reduced]))\n \n if y_pred_instance_reduced != y_instance:\n incorrect_class += 1\n else:\n correct_class += 1\n \n n = incorrect_class + correct_class\n p = correct_class / n\n \n # For acceptance\n z = 0.9\n lower_bound = lower(p, z, n)\n upper_bound = upper(p, z, n)\n# print(lower_bound, upper_bound, incorrect_class, correct_class)\n if (incorrect_class/n <= lower_bound) or (correct_class/n >= upper_bound):\n acceptable = np.hstack([acceptable, index])\n \n\n \n # For removing\n z = 0.7\n lower_bound = lower(p, z, n)\n upper_bound = upper(p, z, n)\n \n if (incorrect_class/n <= lower_bound) or (correct_class/n >= upper_bound):\n acceptable = np.delete(acceptable, [index], axis=0) \n\n# if p == 1:\n# break\n \n x_train_reduced = X_train[acceptable]\n y_train_reduced = y_train[acceptable]\n indexes_reduced = acceptable\n \n return x_train_reduced, y_train_reduced, indexes_reduced", "def fit_nested_cv(self,X, Y, n_cv_outer=5,n_cv_inner=5, verbose=1, continuous_folds=False):\n\n\n\n if np.ndim(X)==1:\n X = np.transpose(np.atleast_2d(X))\n\n if continuous_folds == True:\n raise NotImplementedError()\n else:\n\n # indices of outer test/train split for each fold\n # It is imperative that the random state be identical to the random state of the Kfold used\n # in ensemble_cv\n cv_kf = KFold(n_splits=n_cv_outer, shuffle=True, random_state=42)\n skf = cv_kf.split(X)\n\n i=1\n Y_hat=np.zeros((len(Y),n_cv_outer))\n pR2_cv = list()\n # In outer loop, we rotate the test set through the full dataset\n for idx_r, idx_t in skf:\n if verbose > 1:\n print( '...runnning outer cv-fold', i, 'of', n_cv_outer)\n\n Xr_o = X[idx_r, :] # train set input\n Yr_o = Y[idx_r] # train set output\n Xt_o = X[idx_t, :] # test set input\n Yt_o = Y[idx_t] # test set output (used for scoring ensemble only)\n\n\n cv_kf_in = KFold(n_splits=n_cv_inner, shuffle=True, random_state=42)\n skf_inner = cv_kf_in.split(Xr_o)\n\n j=1\n # In the inner loop, we perform CV to predict the full validation set Yr_o, which will be recorded\n # to be used for ensemble training. THEN we use the full Xr_o to predict values for Xt_o, which will\n # be used for ensemble evaluation.\n for idx_r_inner, idx_t_inner in skf_inner:\n\n j+=1\n Xr = Xr_o[idx_r_inner, :]\n Yr = Yr_o[idx_r_inner]\n Xt = Xr_o[idx_t_inner, :]\n Yt = Yr_o[idx_t_inner]\n # Predict a fold of the Yr_o (validation)\n self.fit(Xr, Yr, get_history_terms = False)\n Yt_hat = self.predict(Xt, get_history_terms = False)\n\n full_indices = idx_r[idx_t_inner] # indices of inner loop\n Y_hat[full_indices,i-1] = Yt_hat\n\n Yt_hat.reshape(Yt.shape)\n pR2 = self.poisson_pseudoR2(Yt, Yt_hat, np.mean(Yr))\n pR2_cv.append(pR2)\n\n if verbose > 1:\n print( 'pR2: ', pR2)\n\n # Now predict the ensemble's test set\n self.fit(Xr_o, Yr_o, get_history_terms = False)\n Yt_hat = self.predict(Xt_o, get_history_terms = False)\n\n Y_hat[idx_t,i-1] = Yt_hat\n pR2 = self.poisson_pseudoR2(Yt_o, Yt_hat, np.mean(Yr_o))\n pR2_cv.append(pR2)\n\n i+=1\n\n if verbose > 0:\n print(\"pR2_cv: %0.6f (+/- %0.6f)\" % (np.mean(pR2_cv),\n np.std(pR2_cv)/np.sqrt(n_cv_inner*n_cv_outer)))\n\n return Y_hat, pR2_cv", "def evaluate(X, Y, hyperparams):\n\n# from scikits.learn.cross_val import LeaveOneOut\n# loo = LeaveOneOut(len(Y))\n from scikits.learn.cross_val import KFold\n K = 5\n# print >> sys.stderr, \"Using 10-fold cross-validation\"\n loo = KFold(len(Y), K)\n# print loo\n\n all_y_test = []\n all_y_test_predict = []\n\n nlltotal = 0.\n for train, test in loo:\n trainidx = [idx for idx in range(len(train)) if train[idx]]\n testidx = [idx for idx in range(len(test)) if test[idx]]\n X_train, X_test, y_train, y_test = X[trainidx], X[testidx], Y[trainidx], Y[testidx]\n# print \"train\", X_train.shape, y_train.shape\n# print \"test\", X_test.shape, y_test.shape\n\n if len(frozenset(y_train)) == 1:\n # Skip training on this LOO set if there is only one y-value in the training set\n continue\n\n clf = fit_classifier(X_train, y_train, hyperparams)\n\n# print \"target\", y_test\n## print \"predict\", clf.predict(X_test)\n# print \"predict\", clf.predict_proba(X_test)\n## print \"df\", clf.decision_function(X_test)\n## print \"score\", clf.score(X_test, y_test)\n\n# y_test_predict = clf.predict_proba(X_test)\n y_test_predict = clf.predict(X_test)\n# print y_test_predict\n\n all_y_test.append(y_test)\n all_y_test_predict.append(y_test_predict)\n\n## print clf.best_estimator\n# print precision_score(y_test, y_test_predict)\n# print recall_score(y_test, y_test_predict)\n# print classification_report(y_test, y_test_predict)\n#\n#\n# assert y_test.shape == (1,)\n# assert y_test_predict.shape == (1,)\n# if y_test_predict[0] >= 1.:\n## print >> sys.stderr, \"WHA? y_test_predict[0] %f >= 1. !!!\" % y_test_predict[0]\n# y_test_predict[0] = 1-1e-9\n# elif y_test_predict[0] <= 0.:\n## print >> sys.stderr, \"WHA? y_test_predict[0] %f <= 0. !!!\" % y_test_predict[0]\n# y_test_predict[0] = 1e-9\n#\n# if y_test[0] == 1:\n# probtarget = y_test_predict[0]\n# else:\n# assert y_test[0] == 0\n# probtarget = 1-y_test_predict[0]\n## print \"probtarget\", probtarget\n## print y_test[0], y_test_predict[0], repr(probtarget)\n# nll = -math.log(probtarget)\n## print \"nll\", nll\n## print\n#\n# nlltotal += nll\n# nlltotal /= len(Y)\n## print \"nlltotal %f (alpha=%f, n_iter=%d)\" % (nlltotal, alpha, n_iter)\n# return nlltotal\n\n y_test = numpy.hstack(all_y_test)\n y_test_predict = numpy.hstack(all_y_test_predict)\n assert y_test.ndim == 1\n assert y_test_predict.ndim == 1\n assert Y.shape == y_test.shape\n assert y_test.shape == y_test_predict.shape\n# import plot\n# print \"precision_recall_fscore_support\", scikits.learn.metrics.precision_recall_fscore_support(y_test, y_test_predict)\n f1 = f1_score(y_test, y_test_predict)\n# print \"\\tf1 = %0.3f when evaluating with %s\" % (f1, hyperparams)\n# sys.stdout.flush()\n# precision, recall, thresholds = scikits.learn.metrics.precision_recall_curve(y_test, y_test_predict)\n# plot.plot_precision_recall(precision, recall)\n# print \"confusion_matrix\", scikits.learn.metrics.confusion_matrix(y_test, y_test_predict)\n# print \"roc_curve\", scikits.learn.metrics.roc_curve(y_test, y_test_predict)\n# fpr, tpr, thresholds = scikits.learn.metrics.roc_curve(y_test, y_test_predict)\n# print \"auc\", scikits.learn.metrics.auc(fpr, tpr)\n# plot.plot_roc(fpr, tpr)\n return f1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the component graph that shows all component connections
def component_graph(self): packages = self.packages() return self.graph_generator.generate(packages, self.services(packages)).export()
[ "def get_connected_components(self):\r\n # Reset the network.\r\n self.reset_network()\r\n\r\n # Keep track of the number of nodes visited.\r\n num_visited = 0\r\n\r\n # Make the result list of lists.\r\n components = []\r\n\r\n # Repeat until all nodes are in a connected component.\r\n while num_visited < len(self.all_nodes):\r\n # Find a node that hasn't been visited.\r\n start_node = None\r\n for node in self.all_nodes:\r\n if not node.visited:\r\n start_node = node\r\n break\r\n\r\n # Make sure we found one.\r\n assert start_node != None\r\n\r\n # Add the start node to the stack.\r\n stack = []\r\n stack.append(start_node)\r\n start_node.visited = True\r\n num_visited += 1\r\n\r\n # Add the node to a new connected component.\r\n component = []\r\n components.append(component)\r\n component.append(start_node)\r\n\r\n # Process the stack until it's empty.\r\n while len(stack) > 0:\r\n # Get the next node from the stack.\r\n node = stack.pop()\r\n\r\n # Process the node's links.\r\n for link in node.links:\r\n # Only use the link if the destination\r\n # node hasn't been visited.\r\n to_node = link.node1\r\n if not to_node.visited:\r\n # Mark the node as visited.\r\n to_node.visited = True\r\n\r\n # Mark the link as part of the tree.\r\n link.visited = True\r\n num_visited += 1\r\n\r\n # Add the node to the current connected component.\r\n component.append(to_node)\r\n\r\n # Push the node onto the stack.\r\n stack.append(to_node)\r\n\r\n # Return the components.\r\n return components", "def get_components(G):\n\n\n if nx.is_connected(G): return [from_nx_graph(G)]\n\n H_list = list()\n for cc in nx.connected_component_subgraphs(G):\n H_list.append(from_nx_graph(cc))\n\n return H_list", "def connectedComponents(G):\r\n # create empty list for visited nodes\r\n visited = []\r\n # create empty list for set of connected nodes\r\n cc = []\r\n \r\n # loop over nodes and mark them as unvisited\r\n for i in range(len(G.nodes)):\r\n visited.append(False)\r\n \r\n # loop over list of nodes\r\n nodes = list(G.nodes)\r\n for v in range(len(nodes)):\r\n\r\n # if not yet visited, check whether it is connected and make a subgraph of the connected set\r\n if visited[v] == False:\r\n temp = nx.MultiGraph() # create empty subgraph\r\n cc.append(DFSUtil(G, temp, v, visited, nodes)) # fill subgraph\r\n \r\n return cc", "def connections(self):\n if self._connections is None:\n # get connection pairs\n w = 10 if self.width == 24 else 11\n conn = [(anode, cathode) for cathode in range(12) for anode in [a for a in range(12) if a!= cathode][:w]]\n # arrange connection pairs in coordinate grid\n col_height, cols = (5, 24) if self.width == 24 else (11, 12)\n self._connections = [conn[col_height*i:col_height*i+col_height] for i in range(cols)]\n return self._connections", "def connected_components(self):\n\n comps = super(inference_dag, self).connected_components()\n\n # Build a dag representing the relationship between the connected components.\n comp_dag = dag()\n for d in range(0, len(comps)):\n comp_dag.add_vertex(self.vtype(d))\n isplit = [set(var_key_noarray(v).replace(\"_inref__split_\", \"\") for v in comps[d].roots() if \"_split_\" in var_key_noarray(v)) for d in range(0, len(comps))]\n osplit = [set(var_key_noarray(v).replace(\"_outref__split_\", \"\") for v in comps[d].leaves() if \"_split_\" in var_key_noarray(v)) for d in range(0, len(comps))]\n for d1 in range(0, len(comps)):\n for d2 in range(d1 + 1, len(comps)):\n if len(osplit[d1] & isplit[d2]) > 0:\n comp_dag.add_edge(edge(d1, d2))\n if len(osplit[d2] & isplit[d1]) > 0:\n comp_dag.add_edge(edge(d2, d1))\n\n # Order the components based on a topological sort of the component dag.\n return [comps[d] for d in comp_dag.topo_sort()]", "def find_connected_components(self):\n\n connected_comp_list = []\n\n visited = set()\n\n\n\n def dfs_traversal_recursive(vertex, visited_vertices, connected_vertices):\n print(f'Visiting vertex {vertex.get_id()}')\n\n visited_vertices.add(vertex)\n connected_vertices.append(vertex.get_id())\n\n # recurse for each vertex in neighbors\n for neighbor in vertex.get_neighbors():\n if neighbor not in visited_vertices:\n dfs_traversal_recursive(neighbor, visited_vertices, connected_vertices)\n return connected_vertices\n\n\n for vertex in self.get_vertices():\n if vertex not in visited:\n # visited.add(vertex)\n current_connected_vertices = []\n dfs_traversal_recursive(vertex, visited, current_connected_vertices)\n connected_comp_list.append(current_connected_vertices)\n\n\n return connected_comp_list", "def connected_components(graph):\n \n visited= []\n components = []\n for node in sorted(graph.nodes()): \n print(node)\n if node not in visited:\n temp = Search(graph,node)\n components.append(temp)\n for node in temp:\n visited.append(node)\n return components", "def connectedComponents(self): \n marked = {}\n belongsTo = {}\n for key in self.__Graph:\n marked[key] = False\n belongsTo[key] = None\n count = 0\n for key in self.__Graph:\n if not marked[key]: \n count+=1\n self.__dfsCC(key,marked,count,belongsTo)\n return count,belongsTo", "def connected_components(self) -> list: # list of lists\n nodes_that_left = [] # the keys of the nodes that doesn't belong to another connected_component\n connected_components = [] # list of all the connected_components in this graph\n for node in self.graph.nodes:\n nodes_that_left.append(node)\n while nodes_that_left:\n n = nodes_that_left[0]\n n_connected_component = self.connected_component(n) # the connected_component of n\n connected_components.append(n_connected_component)\n for key in n_connected_component:\n nodes_that_left.remove(key)\n return connected_components", "def cycles(self) -> List[GraphComponent]:\n return [\n compo\n for _, compo in self.tarjan_scc().items()\n if len(compo) > 1 or compo[0] in self.edges[compo[0]]\n ]", "def graph(self):\n return self.service_root.graph", "def get_connected_components(self):\n start_id = choice(list(self.__vertex_dict.keys()))\n\n # must be a list, can't be a set because random.choice does not it\n remaining_ids = list(self.__vertex_dict.keys())\n remaining_ids.remove(start_id) \n\n seen = set()\n seen.add(start_id)\n\n queue = deque()\n queue.append(self.get_vertex(start_id))\n\n components = []\n com = []\n while queue:\n v_obj = queue.pop()\n v_id = v_obj.get_id()\n com.append(v_id)\n\n neighbors = v_obj.get_neighbors()\n\n for n in neighbors:\n n_id = n.get_id()\n if n_id not in seen:\n seen.add(n_id)\n queue.appendleft(n)\n remaining_ids.remove(n_id)\n\n # if there is no vertex left in the queue\n if len(queue) == 0:\n components.append(com)\n # if there are no more components left to traverse through\n if len(remaining_ids) == 0:\n break\n com = []\n new_start = choice(remaining_ids)\n seen.add(new_start)\n queue.appendleft(self.get_vertex(new_start))\n remaining_ids.remove(new_start)\n\n return components", "def graph(self):\n return [env.graph for env in self._envs]", "def find_connected_components(self, instance):\n self.disk_graph_captors(instance)\n connected_components = nx.connected_components(self.disk_graph_com)\n connected_components = [[e for e in c] for c in connected_components]\n return connected_components", "def get_complex_components(self):\n return list(nx.connected_components(self.G_infected))", "def communicating_classes(self):\n return list(nx.strongly_connected_components(self._graph))", "def get_all(self):\n\t\treturn self.all_connections", "def graphs(self):\n\n return [v.graph for v in dict.values(self)]", "def __iter__(self):\n return iter(self._connector.graph_names())", "def label_connected_component_subgraphs(G):\n G = G.copy()\n for i, nodeset in enumerate(nx.connected_components(G)):\n for n in nodeset:\n G.nodes[n][\"subgraph\"] = i\n return G" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a sliding window (of width n) over data from the iterable s > (s0,s1,...s[n1]), (s1,s2,...,sn), ...
def window(seq, n): it = iter(seq) result = tuple(islice(it, n)) if len(result) == n: yield result for elem in it: result = result[1:] + (elem,) yield result
[ "def sliding_window(iterable, window_size=3, padded=False):\n\n # get an iterator from the iterable (df row)\n i = iter(iterable.index)\n\n # prepare an empty array for the window\n win = [0] if padded else []\n\n # fill the window with prev, current and next elements\n for e in range(0, window_size - (1 if padded else 0)):\n win.append(next(i))\n\n # yield the window\n yield win\n\n # for all elements in the iterator\n for e in i:\n # keep the last two elements in the window\n # append the next element in the iterator\n win = win[1:] + [e]\n\n # return a new window\n yield win\n\n if padded:\n yield win[1:] + [999999999999]", "def window_1(sequence, n):\n items = []\n for i in range(len(sequence)):\n if i+n <= len(sequence):\n items.append(tuple(sequence[i:i+n]))", "def Window(iterable, n=2):\n it = iter(iterable)\n win = cl.deque(it >> Take(n), n)\n yield tuple(win)\n for e in it:\n win.append(e)\n yield tuple(win)", "def window(seq, winsize, winstep):\r\n assert winsize >= winstep, \"Window step must me at most window size.\"\r\n gen = islice(window_1(seq, n=winsize), None, None, winstep)\r\n for result in gen:\r\n yield list(result)", "def window(iter, pre_size=1, post_size=1):\n pre_iter, iter = itertools.tee(iter)\n pre_iter = itertools.chain((None,) * pre_size, pre_iter)\n pre_iter = nwise(pre_iter, pre_size)\n post_iter, iter = itertools.tee(iter)\n post_iter = itertools.chain(post_iter, (None,) * post_size)\n post_iter = nwise(post_iter, post_size)\n next(post_iter, None)\n return zip(pre_iter, iter, post_iter)", "def window_iter(x, size):\n it = iter(x)\n window = collections.deque(itertools.islice(it, size) , maxlen=size)\n if len(window) > 0:\n yield tuple(window)\n else:\n return\n\n for element in it:\n window.popleft()\n window.append(element)\n yield tuple(window)", "def sliding_window(image, step_size, window_size):\n # slide a window across the image\n for y in range(0, image.shape[0], step_size):\n for x in range(0, image.shape[1], step_size):\n # yield the current window\n yield (x, y, image[y:y + window_size[1], x:x + window_size[0]])", "def sliding_chunker(df, window_len, slide_len):\n chunks = []\n for pos in range(0, len(df), slide_len):\n chunk = df.iloc[pos:pos+window_len].copy()\n if (len(chunk) != window_len):\n continue\n chunks.append(chunk)\n return chunks", "def slidingw(items: Iterable[T], size: int, wrap: bool = False) -> Iterable[tuple[T, ...]]:\n buffer = []\n first_items = None\n\n for item in items:\n buffer.append(item)\n if len(buffer) == size:\n yield tuple(buffer)\n del buffer[0]\n elif wrap and len(buffer) == size - 1:\n first_items = list(buffer)\n\n if wrap and first_items:\n for item in first_items:\n buffer.append(item)\n yield tuple(buffer)\n del buffer[0]", "def sliding_window_data(data_x, chunk_size):\n\tbig_X = []\n\tdata_temp = array_to_list(data_x)\n\tidx_len_tuples = list(range(len(data_temp[0]) - chunk_size+1)) if ids==[] else ids\n\n\tfor i in list(range(len(data_temp[0]) - chunk_size+1)):\n\t\tx = []\n\t\tfor s, sequence in enumerate(data_temp):\n\t\t\tx.append(sequence[i:i+chunk_size])\n\t\tbig_X.append(np.array(x))\n\n\treturn big_X, idx_len_tuples", "def window1d(x, size, step=1):\n return np.lib.stride_tricks.as_strided(x, shape=(x.shape[0]//step - int(np.ceil(size/step)) + 1, size), strides=(x.strides[0]*step,x.strides[0]))", "def window(X, N):\n output = [X[i:i + N] for i in range(len(X) - N)]\n return np.asarray(output).reshape(-1, N), X[N:]", "def window(func,a, window=None):\r\n if window==None: window=256\r\n out = [func(a[x:x+(window-1)]) for x in range(len(a))]\r\n return out", "def window_iter(iterable: Iterable[T], size: int = 2,\n extend: bool = False) -> Iterator[Tuple[T]]:\n window = deque(maxlen=size)\n iterator = iter(iterable)\n extension = []\n\n for item in iterator:\n window.append(item)\n if len(window) >= size:\n yield tuple(window)\n break\n if extend:\n extension.append(item)\n\n for item in chain(iterator, extension):\n window.append(item)\n yield tuple(window)", "def windows(self, window_size=datetime.timedelta(0, 3600)):\r\n for base in self.observations:\r\n start = base.time\r\n end = start + window_size\r\n\r\n oblist = []\r\n for obs in self.observations:\r\n if obs.time >= start and obs.time < end:\r\n oblist.append(obs)\r\n\r\n yield tuple(oblist)", "def get_windows(self, window):\n assert self.num_splits > 1, \"\"\"num_splits should be > 1\"\"\"\n for slice_idx in np.linspace(\n start=self.prediction_length, stop=len(window), num=self.num_splits\n ):\n\n yield window[: int(round(slice_idx))]", "def sliding_window_m_np(xs, window):\n\n rows = xs.shape[0] - window + 1\n\n f = partial(onp.roll, xs, axis=0)\n wide = onp.vstack(list(map(f, -onp.arange(rows))))\n return wide[:, :window]", "def _data_windows(\n data: torch.Tensor, *, window_length: int, batch_size: int\n ) -> Generator[torch.Tensor, None, None]:\n\n data_windows = []\n for i in range(0, data.shape[0] - window_length, window_length):\n window_slice = slice(i, i + window_length)\n data_windows.append(data[window_slice])\n if (i + 1) % batch_size == 0 or i == data.shape[0] - window_length:\n yield torch.stack(data_windows, dim=1)\n data_windows = []", "def window_df(df, segment_len=32, slide_len=2):\n # Removing n oldest rows so segments divide evenly into df rows\n to_remove = len(df) % segment_len\n if (to_remove > 0):\n df = df.iloc[to_remove:]\n print(f'Dropped {to_remove} row(s) from the beginning')\n\n segments = []\n for start_pos in range(0, len(df), slide_len):\n end_pos = start_pos + segment_len\n segment = df[start_pos:end_pos].copy()\n if len(segment) != segment_len:\n continue\n segments.append(segment)\n return (df.reset_index(), segments)", "def slidingWindow(src_img, step_size, window_size, scale_factor, min_size, max_size):\n scale = 1\n count = 0\n images = []\n coords = []\n \n img_height = src_img.shape[0]\n img_width = src_img.shape[1]\n \n # increase scale to match minimum window size\n while window_size[0] * scale < min_size[0] or window_size[1] * scale < min_size[1]:\n scale *= scale_factor\n \n # sliding window\n while window_size[0] * scale <= max_size[0] and window_size[1] * scale <= max_size[1]:\n \n scaled_window = (int(window_size[0] * scale), int(window_size[1] * scale))\n \n for y in xrange(0, img_height, step_size):\n for x in xrange(0, img_width, step_size):\n \n if x + scaled_window[0] > img_width or y + scaled_window[1] > img_height:\n continue\n \n img = src_img[y : y + scaled_window[1], x : x + scaled_window[0]]\n \n if scaled_window != window_size:\n img = cv2.resize(img, window_size)\n \n images.append(img)\n coords.append(Rect(x, y, scaled_window[0], scaled_window[1]))\n \n count += 1\n \n if count > 2048:\n yield numpy.array(images).reshape([count, window_size[0] * window_size[1]]), coords\n images = []\n coords = []\n count = 0\n \n \n if count > 0:\n yield numpy.array(images).reshape([count, window_size[0] * window_size[1]]), coords\n images = []\n coords = []\n count = 0\n \n scale *= scale_factor" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
insert user normal attributes in "normal_users" table
def insertNormalUserAttrsQuery(self,user_id,normal_username,normal_password): return ibs_db.createFunctionCallQuery("insert_normal_user",(user_id, dbText(normal_username), dbText(normal_password)))
[ "def updateNormalUserAttrsQuery(self,user_id,normal_username,normal_password):\n return ibs_db.createFunctionCallQuery(\"update_normal_user\",(user_id, dbText(normal_username), dbText(normal_password)))", "def insert_info(self):\n\t\tself.get_connection()\n\t\tc = self.conn.cursor()\n\t\t#For every USER in the dictionary\n\t\tfor i in range(len(self.info['USER'])):\n\t\t\t#insert them into the database (I <3 SQL INJECTIONS)\n\t\t\tc.execute(\"INSERT OR REPLACE INTO '{tn}' ('{user}', '{dp}', '{g}', '{f}', '{l}', '{e}') VALUES ('{idv}', '{dpv}', '{gv}', '{fv}', '{lv}', '{ev}');\".\\\n\t\t\t\tformat(tn=self.tn, user=self.user, dp=self.dp, g=self.g, f=self.f, l=self.l, e=self.e,\n\t\t\t\t\tidv=self.info['USER'][i], dpv=self.info['pass'][i], gv=self.info['group'][i],\n\t\t\t\t\t fv=self.info['first'][i], lv=self.info['last'][i], ev=self.info['email'][i]))\n\t\t#Log this datbase manipulation\n\t\tself.log_users_creation()\n\t\t#commit to database and close connection\n\t\tself.commit_db()", "def initialize_users_table(self):\n self.execute_queries(queryutils.sql.INIT_USERS[self.dbtype])", "def create_user_mappings(self):\n # This is terribly inefficient, because we'll likely be doing\n # this for a number of users at once.\n UserCategory.objects.get_or_create(user=self.user, category=self.category)\n\n # Enroll the user in the Goals\n goals = self.goals.all()\n for goal in goals:\n ug, _ = UserGoal.objects.get_or_create(user=self.user, goal=goal)\n ug.primary_category = self.category\n ug.save()\n\n # Enroll the User in the Actions\n actions = Action.objects.published().filter(goals__in=goals)\n actions = actions.distinct()\n for action in actions:\n ua, _ = UserAction.objects.get_or_create(user=self.user, action=action)\n ua.primary_category = self.category\n ua.primary_goal = ua.get_primary_goal()\n ua.save()", "def insert_users(self):\n print(\"Adding users\")\n users_ids = self.fs_helper.get_all_ids()\n docs = []\n for user_id in users_ids:\n has_label = self.fs_helper.has_labels(user_id)\n doc = {\n \"_id\": str(user_id),\n \"has_labels\": has_label,\n }\n docs.append(doc)\n collection = self.db['user']\n collection.insert_many(docs, ordered=False)", "def insert_user(self, user, uid):\n self.execute(\"INSERT INTO users \\\n (id, name, case_id, user_type) \\\n VALUES (\" + \",\".join([self.wildcard]*4) + \")\", \n (uid, user.name, user.case_id, user.user_type))\n self.commit()", "def initalize_user_tables(db):\n \n from shotglass2.users.models import init_db as users_init_db \n users_init_db(db)", "def load_users():\n\twith open('./seed_data/users.tsv','r+') as data:\n\t\tfor row in data:\n\t\t\trow = row.rstrip()\n\t\t\tuser_name, password, user_type_id = row.split(\"\\t\")\n\n\t\t\tuser= Users(user_name=user_name,\n\t\t\t\t\t\tpassword=password,\n\t\t\t\t\t\tuser_type_id= user_type_id)\n\t\t\tdb.session.add(user)\n\t\t\tdb.session.commit()", "def create_user(self, user):\n sql =(\n \"\"\"INSERT INTO users (full_name , email, password, contact, user_role) \n VALUES('{}','{}','{}','{}','{}');\n \"\"\".format(user.full_name, user.email, \n generate_password_hash(user.password), user.contact, user.user_role)\n )\n self.cur.execute(sql)\n self.conn.commit()", "def put_user(self, user):\n\t_result = self.connection.query(\n\t \"\"\"INSERT INTO users (email, password, title, first_name,\n\t last_name, affiliation, phone_number, fax_number, street,\n\t postal_code, city, state, country, sys_role) VALUES (%s)\"\"\" %\n\t (user.as_query_string()))", "def _MakeBasicUser(self, user):\n return user['primaryEmail'], user['id'], user['name']['fullName']", "def set_user_attr(u,attr,val):\n\n\t# sanity check the attribute we were asked to set\n\tif attr not in users_schema:\n\t\tprint(\"That attribute does not exist!\")\n\t\treturn 400\n\n\t# try to set the value\n\ttry:\n\t\tusers.execute('''\n\t\t\t\tUPDATE users\n\t\t\t\tSET '''+attr+'''=?\n\t\t\t\tWHERE UUUID=?;\n\t\t\t''',(val,u)\n\t\t\t#| doing string catenation in SQL would normally be insecure,\n\t\t\t#| but we validate the attribute requested againt a list of valid attributes so it's hopefully fine\n\t\t\t#| (also this is literally the only way to have a variable field be substituted, otherwise we get a syntax error)\n\t\t)\n\texcept BaseException as e:\n\t\tprint(e)\n\t\tprint(\"A fatal error occured while trying to set the value\")\n\t\treturn 500\n\n\t# save our changes\n\tusers_conn.commit()\n\n\t# http 200 okay\n\treturn 200", "def update_users():\n\tfor user in User.query.all():\n\t\tadd_or_update_user( user.name)", "def _populate_and_save_user_profile(self):\n try:\n profile = models.UserProfile.objects.get(user=self._user)\n self._populate_profile_fields(profile)\n\n if len(ldap_settings.AUTH_LDAP_USER_ATTR_MAP) > 0:\n profile = self._populate_profile_fields(profile)\n profile.save()\n except (SiteProfileNotAvailable, ObjectDoesNotExist), e:\n profile = models.UserProfile(user=self._user,\n role=models.UserProfile.ROLE_USER,\n ldap_user=True)\n \n\n profile = self._populate_profile_fields(profile)\n\n profile.save()", "def create_user_profiles(apps, schema_editor):\n User = apps.get_model(\"auth\", \"User\")\n UserProfile = apps.get_model('elvis', 'UserProfile')\n\n for user in User.objects.all():\n new_profile = UserProfile(user=user)\n new_profile.save()", "def load_users():\n\n lines = [line.rstrip('\\n') for line in open(\"seed_data/users.csv\")]\n\n for line in lines: \n column_data = line.split(\",\")\n line = User(user_id=column_data[0], user_name=column_data[1], password=column_data[2], email=column_data[3], phone=column_data[4], mile_time=column_data[5])\n db.session.add(line)\n db.session.commit()", "def _load_schema_users(self):\n users = table_abstraction.TableAbstraction('user')\n users.add_column_row_id_alias()\n users.add_column_string('email', nullable=False, unique=True, indexed=True)\n users.add_column_string('password', nullable=False, unique=False, indexed=False)\n #users.add_column_string('meta_data', nullable=True, unique=False, indexed=False)\n return users", "def normalizeUsers():\n usersList = files.readUsers()\n newUsersList = {}\n\n i = 1\n newUid = 1\n for hashId, user in usersList.iteritems():\n uid = user.uid\n user.uid = str(newUid)\n location = user.location\n if location['name'] != \"\" and 'lat' not in location:\n if isinstance(location['name'], unicode):\n location = location['name'].encode('utf-8')\n else:\n location = location['name']\n\n mapInfo = PBAMap.getLatLong(location, i)\n i += 1\n if mapInfo == 'apiLimit':\n print str(i) + \" At daily API limit. Update script and repeat tomorrow\"\n elif mapInfo != '':\n user.location = {\n 'name': location,\n 'lat': mapInfo['lat'],\n 'lng': mapInfo['lng'],\n }\n if 'country' in mapInfo:\n user.location['country'] = mapInfo['country']\n print str(i), user.location\n else:\n print str(i), \"checked: none\"\n user.location = {'name': ''}\n newUid += 1\n newUsersList[hash(str(uid))] = user\n\n writeJSONFile('../data/users.json', newUsersList)\n print \"User ids, usernames, and locations updated\\n\"", "def import_users(cursor):\n print(\"*** Inserting Users ***\")\n id = 1\n dataset = DataParser.get_dataset()\n with open(os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), \"data\",\n dataset, \"users.dat\")) as f:\n for line in f:\n splitted = line.replace(\"\\n\", \"\").split(\" \")\n num_articles = int(splitted[0])\n\n cursor.execute(\"insert into users(id) values(%s)\" % id)\n for i in range(1, num_articles + 1):\n if dataset == 'citeulike-t':\n article_id = int(splitted[i])\n elif dataset == 'citeulike-a':\n article_id = int(splitted[i]) + 1\n cursor.execute(\"insert into articles_users(user_id, article_id) values(%s, %s)\", (id, article_id))\n id += 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
update user normal attributes in "normal_users" table
def updateNormalUserAttrsQuery(self,user_id,normal_username,normal_password): return ibs_db.createFunctionCallQuery("update_normal_user",(user_id, dbText(normal_username), dbText(normal_password)))
[ "def insertNormalUserAttrsQuery(self,user_id,normal_username,normal_password):\n return ibs_db.createFunctionCallQuery(\"insert_normal_user\",(user_id, dbText(normal_username), dbText(normal_password)))", "def update_users():\n\tfor user in User.query.all():\n\t\tadd_or_update_user( user.name)", "def set_user_attr(u,attr,val):\n\n\t# sanity check the attribute we were asked to set\n\tif attr not in users_schema:\n\t\tprint(\"That attribute does not exist!\")\n\t\treturn 400\n\n\t# try to set the value\n\ttry:\n\t\tusers.execute('''\n\t\t\t\tUPDATE users\n\t\t\t\tSET '''+attr+'''=?\n\t\t\t\tWHERE UUUID=?;\n\t\t\t''',(val,u)\n\t\t\t#| doing string catenation in SQL would normally be insecure,\n\t\t\t#| but we validate the attribute requested againt a list of valid attributes so it's hopefully fine\n\t\t\t#| (also this is literally the only way to have a variable field be substituted, otherwise we get a syntax error)\n\t\t)\n\texcept BaseException as e:\n\t\tprint(e)\n\t\tprint(\"A fatal error occured while trying to set the value\")\n\t\treturn 500\n\n\t# save our changes\n\tusers_conn.commit()\n\n\t# http 200 okay\n\treturn 200", "def edit_user(self, user: User, attribute: dict[str, Any]) -> None:\n\t\tpass", "def test_update_not_saved(self):\n user = BonitaUser(username=u'myusername', password=u'mypassword')\n\n user._update_base_attributes()", "def update_user_info():\n\n email = session.get('email')\n\n new_buying_power = request.form.get('buying-power')\n\n this_user = User.query.filter_by(email=email).first()\n # print(\"before update\", this_user)\n this_user.buying_power = new_buying_power\n # print(\"After update\", this_user)\n db.session.commit()\n \n return 'New information updated.'", "def test_base_attributes_modified(self):\n user = BonitaUser(username=u'myusername', password=u'mypassword')\n user._uuid = 'myuuid'\n user.clear()\n\n # Prepare response of MockedServer\n url = '/identityAPI/updateUserByUUID'\n code = 200\n user_xml = build_bonita_user_xml(uuid='myuuid', password='mypassword', username='other_usernames')\n BonitaServer.set_response_list([[url, code, user_xml]])\n\n # Modify some base attributes\n user.last_name = u'last_name'\n user.title = u'Doctor'\n user.username = u'other_username'\n user.first_name = u'first_name'\n user.job_title = u'job_title'\n\n user._update()\n\n assert user.is_modified is False\n assert user.last_name == u'last_name'\n assert user.title == u'Doctor'\n assert user.username == u'other_username'\n assert user.first_name == u'first_name'\n assert user.job_title == u'job_title'", "def update_attributes_user(self, username, attributes):\n request = Request(\n method=\"put\",\n endpoint=\"/user/{}/attributes\".format(username),\n data=attributes,\n )\n\n def response_handler(resp):\n if not resp.is_success:\n raise UpdateAttributes(resp, request)\n else:\n return resp.body\n\n return self._execute(request, response_handler, custom_prefix=\"/_api\")", "def updateUserProfile(self, username, adminTokenId, data):\r\n try:\r\n endpoint = \"/openam/json/users/\" + str(username)\r\n\r\n headers = {\r\n \"Accept\": \"application/json\",\r\n \"Content-type\": \"application/json\",\r\n \"iplanetDirectoryPro\": str(adminTokenId),\r\n }\r\n\r\n address = ''\r\n address += data['address'] + \", \" if 'address' in data else ''\r\n address += data['postcode'] + \", \" if 'postcode' in data else ''\r\n address += data['city'] + \", \" if 'city' in data else ''\r\n address += data['country'] if 'country' in data else ''\r\n\r\n gender = 'm' if data['gender'] == 'M' else 'f'\r\n\r\n d = {\r\n \"mail\": data[\"mail\"],\r\n \"sn\": data['surname'],\r\n \"givenName\": data[\"name\"],\r\n 'telephoneNumber': data['phone'] if 'phone' in data else None,\r\n \"postalAddress\": address,\r\n \"sunIdentityServerPPCommonNameSN\": data['surname'],\r\n \"sunIdentityServerPPCommonNameFN\": data['name'],\r\n \"sunIdentityServerPPLegalIdentityGender\": \"urn:liberty:id-sis-pp:gender:\" + str(gender),\r\n \"sunIdentityServerPPDemographicsBirthDay\": None,\r\n \"sunIdentityServerPPAddressCard\": address if address != \"\" else None,\r\n \"sunIdentityServerPPDemographicsDisplayLanguage\": data['language'] if 'language' in data and data['language'] != \"\" else [],\r\n \"sunIdentityServerPPLegalIdentityVATIdValue\": data[\"vat\"] if 'vat' in data and data['vat'] != \"\" else []\r\n }\r\n payload = json.dumps(d, separators=(',', ':'), indent=4)\r\n\r\n # Request\r\n conn = httplib.HTTPConnection(self.base)\r\n conn.request(\"PUT\", endpoint, payload, headers)\r\n\r\n # Response\r\n response = conn.getresponse()\r\n return response.status, response.read()\r\n\r\n except Exception, e:\r\n if settings.DEBUG:\r\n print_exc()\r\n return 500, str(e)", "def deleteNormalUserAttrsQuery(self,user_id):\n return ibs_db.createFunctionCallQuery(\"delete_normal_user\",(user_id,))", "def normalizeUsers():\n usersList = files.readUsers()\n newUsersList = {}\n\n i = 1\n newUid = 1\n for hashId, user in usersList.iteritems():\n uid = user.uid\n user.uid = str(newUid)\n location = user.location\n if location['name'] != \"\" and 'lat' not in location:\n if isinstance(location['name'], unicode):\n location = location['name'].encode('utf-8')\n else:\n location = location['name']\n\n mapInfo = PBAMap.getLatLong(location, i)\n i += 1\n if mapInfo == 'apiLimit':\n print str(i) + \" At daily API limit. Update script and repeat tomorrow\"\n elif mapInfo != '':\n user.location = {\n 'name': location,\n 'lat': mapInfo['lat'],\n 'lng': mapInfo['lng'],\n }\n if 'country' in mapInfo:\n user.location['country'] = mapInfo['country']\n print str(i), user.location\n else:\n print str(i), \"checked: none\"\n user.location = {'name': ''}\n newUid += 1\n newUsersList[hash(str(uid))] = user\n\n writeJSONFile('../data/users.json', newUsersList)\n print \"User ids, usernames, and locations updated\\n\"", "def update_user(self, user_data):\n # extract the query filter\n query_filter = {\"username\": user_data['target_user']}\n del user_data[\"target_user\"]\n # inject the updated time into the record\n user_data['last_updated'] = int(time.time())\n if 'password' in user_data:\n user_data['password'] = HelperModel.hash_string(user_data['password'])\n user_data = {\"$set\": user_data}\n return self.mongo_toolbox.update_one_record(query_filter, user_data)", "def set_account_information(self, user_id, req):\n c = self.db.cursor()\n try:\n c.execute(\"\"\"\n UPDATE Users\n SET \n username = ?,\n email = ?,\n fName = ?,\n lName = ?,\n streetAddress = ?,\n city = ?,\n state = ?,\n postCode = ?\n WHERE\n id = ?\n \"\"\",\n (\n req['username'],\n req['email'],\n req['fName'],\n req['lName'],\n req['streetAddress'],\n req['city'],\n req['state'],\n req['postCode'],\n user_id\n )\n )\n self.db.commit()\n except sqlite3.Error as e:\n log.error(e)\n raise Exception", "def user_update(wp_users, members, new_users, remove_users):\n with open(wp_users, \"rt\") as fh:\n wp_keys, wp_users = get_wordpress_data(fh)\n wp_username_col = wp_keys.index(\"Choose a Username\")\n wp_usernames = [user[wp_username_col].lower() for user in wp_users.values()]\n with open(members, \"rt\") as fh:\n roster_keys, roster_users = get_members_and_adults_from_csv(fh)\n new_emails = [em for em in roster_users.keys() if em not in wp_users.keys()]\n remove_emails = [em for em in wp_users.keys() if em not in roster_users.keys()]\n with open(new_users, \"wt\") as fh:\n for em in new_emails:\n username = roster_users[em][roster_keys.index(\"Member: Last Name\")]\n if username.lower() not in wp_usernames:\n fh.write(f\"{username}, {em}{os.linesep}\")\n with open(remove_users, \"wt\") as fh:\n for em in remove_emails:\n fh.write(em)\n fh.write(os.linesep)", "def update_users():\n if request.user.user_id != local.model.root_user_id:\n abort(httplib.FORBIDDEN, \"Only root user can update other users.\")\n\n users = AuthenticatedUserSchema(\n strict=True, many=True\n ).load(request.json, partial=True).data\n\n if len(users) != 1:\n abort(httplib.BAD_REQUEST, \"Users can only be updated on at a time.\")\n\n local.model.update_user_info(users[0])\n\n # Return updated users\n users = local.model.get_users(user_ids=[users[0]['user_id']])\n return AuthenticatedUserSchema(many=True).dump(users).data", "def update_in_db(self, data):\n UserModel.query.filter_by(id=self.id).update(data)\n db.session.commit()", "def modify_users(self, user_list):\n return self.user_manager.modify_objects(user_list)", "async def on_user_update(self, before: Member, after: Member):", "def update_user_details(secateur_user):\n account = secateur_user.account\n\n get_user.delay(secateur_user.pk, user_id=account.pk).forget()\n\n ## I'm not convinced I need to update these, and any secateur user\n ## might have a lot of them.\n twitter_update_mutes(secateur_user)\n twitter_update_blocks(secateur_user)\n\n ## Definitely need this one.\n twitter_update_friends(secateur_user)\n ## TODO: Add twitter list support." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
delete user normal attributes from "normal_users" table
def deleteNormalUserAttrsQuery(self,user_id): return ibs_db.createFunctionCallQuery("delete_normal_user",(user_id,))
[ "def remove_all_attributes_user(self, username):\n request = Request(\n method=\"delete\", endpoint=\"/user/{}/attributes/truncate\".format(username)\n )\n\n def response_handler(resp):\n if not resp.is_success:\n raise RemoveAllAttributes(resp, request)\n else:\n return resp.body\n\n return self._execute(request, response_handler, custom_prefix=\"/_api\")", "def remove_attr(self, user, key):\n query = \"DELETE FROM attributes WHERE attr_uid = ? AND attr_key = ?\"\n with self._db_access_lock, sqlite.connect(self._dbfile) as conn:\n conn.execute(query, (user, key))", "def cleanup_and_delete( self ):\n # Set up unique properties to delete\n uniques = [ ]\n key = '%s.%s:%s' % ( self.__class__.__name__, 'username',\n self.username_lower )\n uniques.append( key )\n if self.email_pending is not None:\n key = '%s.%s:%s' % ( self.__class__.__name__, 'email',\n self.email_pending_lower )\n uniques.append( key )\n if( self.email_verified is not None\n and self.email_verified_lower != self.email_pending_lower ):\n key = '%s.%s:%s' % ( self.__class__.__name__, 'email',\n self.email_verified_lower )\n uniques.append( key )\n for auth_id in self.auth_ids:\n key = '%s.%s:%s' % ( self.__class__.__name__, 'auth_id',\n auth_id )\n uniques.append( key )\n\n # Delete uniques then delete the user entity from the datastore\n self.unique_model.delete_multi( uniques )\n self.key.delete( )", "def delete_user(self):\n \n User.user_list.remove(self)", "def clear_table_user_info(jwt_auth, username):\n assert jwt_auth.delete('userinfo/' + username).status_code == 200", "def deleteUser(self):\r\n os.system(\"attrib -h -s -r \" + tempfile.gettempdir() + r\"\\temp.temp\")\r\n os.system(\"del \" + tempfile.gettempdir() + r\"\\temp.temp\")", "def updateNormalUserAttrsQuery(self,user_id,normal_username,normal_password):\n return ibs_db.createFunctionCallQuery(\"update_normal_user\",(user_id, dbText(normal_username), dbText(normal_password)))", "def delete_User(self):\n User.user_lst.remove(self)", "def delete_user():\n os.remove(_user_path())", "def delete_user(u):\n db.engine.execute(\"DELETE FROM usersfts WHERE username='{}'\".format(u.username))\n db.session.delete(u)\n db.session.commit()", "def drop_user_table(self):\n self.cursor.execute(\"\"\"DROP TABLE IF EXISTS diary_users CASCADE\"\"\")\n self.cursor.close()\n self.conn.commit()\n self.conn.close()", "def remove(self, user):\n self.connect()\n try:\n sql = \"\"\"delete from {0} where userName = \"{1}\" and password = \"{2}\" \"\"\".format(\n self.tablename, user.userName, user.password\n )\n self.cursor.execute(sql)\n self.db.commit()\n except Exception as err:\n print(err)\n finally:\n self.disconnect()", "def clean_users(self):\n for user in list(db.users.find()):\n views = list(db.views.find({\"user\": user}))\n if len(views) == 0:\n db.users.remove(user)", "def removefsuser(self, username):", "def remove_attribute_user(self, username, attributeid):\n request = Request(\n method=\"delete\",\n endpoint=\"/user/{}/attributes/{}\".format(username, attributeid),\n )\n\n def response_handler(resp):\n if not resp.is_success:\n raise RemoveAttribute(resp, request)\n else:\n return resp.body\n\n return self._execute(request, response_handler, custom_prefix=\"/_api\")", "def remove_user():\r\n user_input = input(\"| Enter the name of the User |\")\r\n aduser.ADUser.from_cn(user_input).delete()\r\n return \"| User removed |\"", "def delallgroupuser(self, username):\n sql = \"DELETE FROM `%s` WHERE `%s`=%%s;\" % (\n self.config.get('tables', 'grouplist', fallback='grouplist'),\n self.config.get('fields', 'username', fallback='username'))\n\n with self.dbs.cursor() as cur:\n cur.execute(sql, username)", "def unapply(self, user):\n raise NotImplementedError", "def clearPassword(self,user):\n user.salt = None\n user.password_hash = None\n self.session.commit()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
check if normal_username multi str arg is exists, and doesn't contain invalid characters current_username shows current usernames, so we don't run into situation that we print an error for username that belongs to this username
def checkNormalUsernameForAdd(self,request): request.needAuthType(request.ADMIN) request.checkArgs("normal_username","current_username") request.getAuthNameObj().canChangeNormalAttrs(None) usernames=self.__filterCurrentUsernames(request) bad_usernames=filter(lambda username: not _checkNormalUsernameChars(username),usernames) exist_usernames=normalUsernameExists(usernames) return self.__createCheckAddReturnDic(bad_usernames,exist_usernames)
[ "def clean_username(self):\n if not alnum_re.search(self.cleaned_data['username']):\n raise forms.ValidationError('Usernames can only contain letters, numbers and underscores')\n try:\n user = User.objects.get(username__exact=self.cleaned_data['username'])\n except User.DoesNotExist:\n return self.cleaned_data['username']\n raise forms.ValidationError('This username is already taken. Please choose another.')", "def validateUsername(username):\n return username and settings.RE_USERNAME.match(username)", "def usernamecheck():\n\t\tif (username == \"NULL\"):\n\t\t\tprint(\"Invalid username: Cannot use 'NULL' as a username.\\n\")\n\t\t\treturn False\n\t\tif (username == \"Guest\"):\n\t\t\t\tprint(\"Invalid username: Cannot use 'Guest' as a username.\\n\")\n\t\t\t\treturn False\n\n\t\tif re.match(\"^[a-zA-Z0-9_]{5,}$\", username):\n\t\t\treturn True\n\t\telse:\n\t\t\tprint(\"Invalid username: Must be at least 5 characters, and can only contain letters, numbers, or underscores.\\n\")\n\t\t\treturn False", "def is_valid_username(username):\n return not re.search(\"[^A-Za-z0-9]\", username)", "def test_some_names(self, _, __, username, realname, success,):\n try:\n validate_username(username, realname)\n except ValidationWarning as ex:\n if success:\n pytest.fail(\n 'Received unexpected error: {error}'.format(error=ex),\n )", "def username_is_valid(username):\n valid = False\n to_check = username\n\n if isinstance(to_check, str):\n to_check = to_check.lower().strip()\n\n if to_check:\n good = re.sub(r'[^a-z0-9#!]', '', to_check)\n\n if good == to_check and len(good) <= 20:\n valid = True\n\n return valid", "def validate_shibboleth_username(value):\n # TODO add some validation based on shibboleth uid\n pass", "def is_valid_username(username):\n return is_valid_email(username)", "def validate_username(username):\n for c in username:\n if c.isalnum() == False and c != '_':\n return False\n \n return bool(username and re.match('^\\w{2,30}$', username))", "def name_taken(self, username: str) -> bool:\n\t\treturn username in self.__user", "def hint_username(username):\n if len(username) < 3:\n print(\"Invalid username. Must be at least 3 characters long\")", "def check_username(self):\n\n self.wfile.write(\"Hello! Please enter a username (max 20 characters): \")\n\n user_ok = False\n while not user_ok:\n #Store the username requested by the client\n username = self.rfile.readline().strip()\n \n user_list = []\n #Check against file list of existing users\n with open(\"usernames.log\", \"a+\") as username_file:\n user_list = [line.rstrip().split(\"@\")[0] for line in username_file]\n if username in user_list:\n #Tell client to retry with new username\n self.wfile.write(\"USERNAME IN USE\")\n self.wfile.write(\"Username already in use. Try another: \")\n else:\n #Add new user to the file list of users\n user_ok = True\n username_file.write(\"{0}@{1}:{2}\\n\".format(username, self.client_address[0], self.client_address[1]))\n print \"User {0}@{1}:{2} added.\".format(username, self.client_address[0], self.client_address[1])\n self.wfile.write(\"OK\")\n\n return username", "def collect_username(self, message=None):\n if not (message is None):\n print(message)\n username = input(\n f'{bcolors.OKBLUE}Please enter your username: {bcolors.ENDC}')\n while len(username) > MAX_LENGTH_USERNAME or len(username) < MIN_LENGTH_USERNAME:\n username = input(f'{bcolors.WARNING}Username must be between {MIN_LENGTH_USERNAME} and {MAX_LENGTH_USERNAME} characters{bcolors.ENDC}'\n f'\\n{bcolors.OKBLUE}Please enter you username: {bcolors.ENDC}')\n return username", "def validate_usernames(self, input_names):\n user_names = []\n for name in input_names:\n logger.debug('name = %s', name)\n if not isinstance(name, str):\n raise TypeError(f'{repr(name)} is not a string')\n if '|' in name:\n raise ValueError(f'\"|\" in user name: {name}')\n if not self._is_ip_address(name.strip()):\n user_names.append(name)\n\n invalid_names = set()\n normalized_missing_names = set()\n for input_chunk in chunked(user_names, MAX_USUSER):\n api_result = self.site.api('query', list='users', ususers='|'.join(input_chunk))\n output_chunk = api_result['query']['users']\n for output_data in output_chunk:\n if 'invalid' in output_data:\n invalid_names.add(output_data['name'])\n elif 'missing' in output_data:\n normalized_missing_names.add(output_data['name'])\n\n result = set()\n for name in user_names:\n if self.normalize_username(name) in normalized_missing_names:\n result.add(name)\n elif name in invalid_names:\n result.add(name)\n\n return result", "def test_username_non_unique(self):\n errors = checks.run_checks()\n self.assertEqual(\n errors,\n [\n checks.Error(\n \"'CustomUserNonUniqueUsername.username' must be \"\n \"unique because it is named as the 'USERNAME_FIELD'.\",\n obj=CustomUserNonUniqueUsername,\n id=\"auth.E003\",\n ),\n ],\n )\n with self.settings(AUTHENTICATION_BACKENDS=[\"my.custom.backend\"]):\n errors = checks.run_checks()\n self.assertEqual(\n errors,\n [\n checks.Warning(\n \"'CustomUserNonUniqueUsername.username' is named as \"\n \"the 'USERNAME_FIELD', but it is not unique.\",\n hint=(\n \"Ensure that your authentication backend(s) can handle \"\n \"non-unique usernames.\"\n ),\n obj=CustomUserNonUniqueUsername,\n id=\"auth.W004\",\n ),\n ],\n )", "def validate_username(username, check_exists=False):\n\n if username_reserved(username):\n raise ValueError('Username is reserved')\n\n if not 3 <= len(username) <= 8:\n raise ValueError('Username must be between 3 and 8 characters')\n\n if not all(c.islower() for c in username):\n raise ValueError('Username must be all lowercase letters')\n\n if check_exists and not user_exists(username):\n raise ValueError('Username does not exist')", "def clean_nickname(self):\n nickname = self.cleaned_data[\"nickname\"]\n if get_user_model().objects.filter(username=nickname).count():\n raise ValidationError(\"This login already exists.\")\n return nickname", "def request_and_validate_user_name_input(self) -> str:\n user_name_valid = False\n user_name = \"\"\n while not user_name_valid:\n user_name = input()\n user_name_valid = self.validate_user_name(user_name)\n if not user_name_valid:\n self.view_printer.print_username_invalid()\n return user_name", "def existingusercheck(userdata):\n\t\tfor line in userdata:\n\t\t\tif (len(line) > 1 and line.split()[0] == username):\n\t\t\t\tif (len(line.split()) == 3):\n\t\t\t\t\treturn line.split()[0], line.split()[1], line.split()[2]\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Invalid userdata: Userdata entry format is invalid. Cannot retrieve userdata.\\n\")\n\t\t\t\t\treturn \"NULL\", \"\", \"\"\n\t\treturn \"\", \"\", \"\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
create_batches loader = DataLoader(dataset=self, batch_size=batch_size, shuffle=shuffle, collate_fn=self.collate_fn(device), pin_memory=False)
def create_batches(self, batch_size=128, shuffle=True): loader = DataLoader(dataset=self, batch_size=batch_size, shuffle=shuffle, collate_fn=self.collate_fn) return loader
[ "def create_loaders(self):\n self.spam_data.text_to_tensors()\n print('creating dataloaders')\n train_data = TensorDataset(self.spam_data.train_inputs, \n self.spam_data.train_masks, \n self.spam_data.train_labels)\n train_sampler = RandomSampler(train_data)\n self.train_dataloader = DataLoader(train_data, \n sampler=train_sampler, \n batch_size=self.batch_size)\n\n validation_data = TensorDataset(self.spam_data.validation_inputs, \n self.spam_data.validation_masks, \n self.spam_data.validation_labels)\n validation_sampler = SequentialSampler(validation_data)\n self.validation_dataloader = DataLoader(validation_data, \n sampler=validation_sampler, \n batch_size=self.batch_size)\n \n test_data = TensorDataset(self.spam_data.test_inputs, \n self.spam_data.test_masks, \n self.spam_data.test_labels)\n test_sampler = SequentialSampler(test_data)\n self.test_dataloader = DataLoader(test_data, \n sampler=test_sampler, \n batch_size=self.batch_size)\n print('finished creating dataloaders')", "def _make_torch_data_loaders(opt, actions):\n train_dataset = Human36M(actions=actions, data_path=opt.data_dir,\n orthogonal_data_augmentation_prob=opt.orthogonal_data_augmentation_prob,\n z_rotations_only=opt.z_rotations_only, dataset_normalization=opt.dataset_normalization,\n flip_prob=opt.flip_prob, drop_joint_prob=opt.drop_joint_prob)\n test_dataset = Human36M(actions=actions, data_path=opt.data_dir,\n dataset_normalization=opt.dataset_normalization, is_train=False)\n\n if opt.use_horovod:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, num_replicas=hvd.size(),\n rank=hvd.rank())\n train_loader = DataLoader(\n dataset=train_dataset,\n batch_size=opt.train_batch,\n sampler=train_sampler, # shuffle=True,#sampler=train_sampler,\n num_workers=args.workers,\n pin_memory=True)\n test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset, num_replicas=hvd.size(),\n rank=hvd.rank())\n test_loader = DataLoader(\n dataset=test_dataset,\n batch_size=opt.train_batch,\n sampler=test_sampler, # shuffle=True,#sampler=train_sampler,\n num_workers=args.workers,\n pin_memory=True)\n else:\n train_loader = DataLoader(\n dataset=train_dataset,\n batch_size=opt.train_batch_size,\n shuffle=True,\n num_workers=opt.workers,\n pin_memory=True)\n test_loader = DataLoader(\n dataset=test_dataset,\n batch_size=opt.test_batch_size,\n shuffle=False,\n num_workers=opt.workers,\n pin_memory=True)\n return train_dataset, train_loader, test_loader", "def data_loader(self, reshape_size, checkpoint):\n self.reshape_size = reshape_size\n logging.info(\"\\nStarting to calculate the statistics...\")\n self.calculate_statistics(checkpoint)\n logging.info(\"Calculating the statistics is finished \\n\")\n \n\n\n self.train_dataset = Dataset_Generator( \n self.df , \n self.existing_channels , \n [\"train\"] , \n self.scaling_factor,\n self.reshape_size , \n self.data_map, \n self.statistics,\n self.augmentation )\n\n self.trainloader = DataLoader(self.train_dataset, \n batch_size=self.batch_size, \\\n shuffle=True, \n num_workers=self.num_workers)\n \n self.validation_dataset = Dataset_Generator( \n self.df , \n self.existing_channels , \n [\"validation\", \"test\"] , \n self.scaling_factor,\n self.reshape_size , \n self.data_map, \n self.statistics,\n [] )\n \n self.validationloader = DataLoader(self.validation_dataset, \n batch_size= self.batch_size, \\\n shuffle=False, \n num_workers=self.num_workers)", "def _initialize_dataloader(\n self, X_train: np.ndarray, y_train: np.ndarray, batch_size: int\n ):\n train_set = SimpleDataset(torch.from_numpy(X_train), torch.from_numpy(y_train))\n self.train_loader = DataLoader(train_set, batch_size, shuffle=True)", "def create_taskloader(dataset, args, epoch=-1):\n task_ds = initialize_taskdataset(\n dataset,\n args.nways,\n args.kshots,\n args.kquery,\n args.epoch_steps * args.gradient_accumulation_steps * args.batch_size,\n )\n\n dl = initialize_taskloader(\n task_ds,\n num_workers=args.num_workers,\n batch_size=args.batch_size,\n deterministic=args.deterministic,\n distributed=args.distributed,\n seed=args.seed,\n epoch=epoch\n )\n return dl", "def _create_dataloaders(config, dataset_class, tf1, tf2, partitions, target_transform=None, shuffle=False):\r\n train_imgs_list = []\r\n #original dataloader generation\r\n for train_partition in partitions:\r\n #specific case: STL10 has \"split\" argument instead of \"train\"\r\n if \"STL10\" == config.dataset:\r\n train_imgs_curr = dataset_class(\r\n root=config.dataset_root,\r\n transform=tf1,\r\n split=train_partition,\r\n target_transform=target_transform)\r\n else:\r\n train_imgs_curr = dataset_class(\r\n root=config.dataset_root,\r\n transform=tf1,\r\n train=train_partition,\r\n target_transform=target_transform)\r\n\r\n train_imgs_list.append(train_imgs_curr)\r\n\r\n train_imgs = ConcatDataset(train_imgs_list)\r\n train_dataloader = torch.utils.data.DataLoader(train_imgs, batch_size=config.iic_dataloader_bs, shuffle=shuffle, num_workers=0, drop_last=False)\r\n\r\n if not shuffle:\r\n assert (isinstance(train_dataloader.sampler, torch.utils.data.sampler.SequentialSampler))\r\n dataloaders = [train_dataloader]\r\n\r\n #a number of augmented dataloader are generated\r\n for d_i in range(config.num_dataloaders):\r\n train_tf_imgs_list = []\r\n for train_partition in partitions:\r\n if \"STL10\" == config.dataset:\r\n train_imgs_tf_curr = dataset_class(\r\n root=config.dataset_root,\r\n transform=tf2, # random per call\r\n split=train_partition,\r\n target_transform=target_transform)\r\n else:\r\n train_imgs_tf_curr = dataset_class(\r\n root=config.dataset_root,\r\n transform=tf2,\r\n train=train_partition,\r\n target_transform=target_transform)\r\n train_tf_imgs_list.append(train_imgs_tf_curr)\r\n train_imgs_tf = ConcatDataset(train_tf_imgs_list)\r\n train_tf_dataloader = torch.utils.data.DataLoader(train_imgs_tf, batch_size=config.iic_dataloader_bs, shuffle=shuffle, num_workers=0, drop_last=False)\r\n\r\n if not shuffle:\r\n assert (isinstance(train_tf_dataloader.sampler, torch.utils.data.sampler.SequentialSampler))\r\n assert (len(train_dataloader) == len(train_tf_dataloader))\r\n dataloaders.append(train_tf_dataloader)\r\n\r\n num_train_batches = len(dataloaders[0]) #Number of batches of first dataloader (and also others)\r\n return dataloaders", "def data_loaders(self, batch_size, split=(0.85, 0.10)):\n assert sum(split) < 1\n\n dataset = self.tensor_dataset\n num_examples = dataset.data_tensor.size()[0]\n a, b = split\n train_dataset = TensorDataset(*dataset[: int(a * num_examples)])\n val_dataset = TensorDataset(*dataset[int(a * num_examples):\n int((a + b) * num_examples)])\n eval_dataset = TensorDataset(*dataset[int((a + b) * num_examples):])\n\n train_dl = DataLoader(\n train_dataset,\n batch_size=batch_size,\n shuffle=True,\n num_workers=4,\n pin_memory=False,\n drop_last=True,\n )\n\n val_dl = DataLoader(\n val_dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=0,\n pin_memory=False,\n drop_last=True,\n )\n\n eval_dl = DataLoader(\n eval_dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=0,\n pin_memory=False,\n drop_last=True,\n )\n return train_dl, val_dl, eval_dl", "def test_dataloader(self):\n return DataLoader(\n self.test_set,\n batch_size=self.hparams.batch_size,\n shuffle=False,\n num_workers=int(os.getenv(\"WORKERS\")),\n pin_memory=True,\n drop_last=True,\n )", "def _init_unlabelled_data_generator(self):\n if self.conf.l_mix == 0:\n return\n\n log.info('Initialising labelled datagen. Loading %s data' % self.conf.dataset_name)\n self.data_unlabelled = \\\n self.loader.load_labelled_data(self.conf.split, 'training',\n modality=self.conf.modality,\n downsample=self.conf.image_downsample)\n\n self.data_unlabelled.sample_per_volume(-1, self.conf.pctg_per_volume, seed=self.conf.seed)\n\n self.data_unlabelled.crop(self.conf.input_shape[:2]) # crop data to input shape: useful in transfer learning\n self.conf.data_len = self.data_unlabelled.size()\n\n datagen_dict1 = self.get_datagen_params()\n datagen_dict2 = self.get_datagen_params()\n datagen_dict3 = self.get_datagen_params()\n img_gen = ImageDataGenerator(**datagen_dict1).flow(self.data_unlabelled.images, batch_size=self.conf.batch_size,\n seed=self.conf.seed)\n anato_msk_gen = ImageDataGenerator(**datagen_dict2).flow(self.data_unlabelled.anato_masks, batch_size=self.conf.batch_size,\n seed=self.conf.seed)\n patho_msk_gen = ImageDataGenerator(**datagen_dict3).flow(self.data_unlabelled.patho_masks, batch_size=self.conf.batch_size,\n seed=self.conf.seed)\n scn_gen = utils.data_utils.generator(self.conf.batch_size, self.conf.seed, 'no_overflow', self.data_unlabelled.scanner)\n return itertools.zip_longest(img_gen, anato_msk_gen, patho_msk_gen, scn_gen)", "def __call__(self) -> DataLoader:\n datasetIndex = next(self.iter_datasets)\n dataset, start_index = datasetIndex\n\n end_task_index = start_index + self.task_samples\n end_task_index = min(end_task_index, len(dataset))\n #task_dataset = subset(dataset, range(start_index, end_task_index))\n\n task_dataset = subsample_dataset(dataset, self.task_samples, dict(enumerate(self.station_settings.weigh_classes)), random_shuffle=True)\n task_loader = self.task_loader(task_dataset)\n\n datasetIndex[1] = len(dataset) % end_task_index\n return task_loader", "def _init_labelled_data_generator(self):\n if self.conf.l_mix == 0:\n return\n\n log.info('Initialising labelled datagen. Loading %s data' % self.conf.dataset_name)\n self.data_labelled = \\\n self.loader.load_labelled_data(self.conf.split, 'training',\n modality=self.conf.modality,\n downsample=self.conf.image_downsample)\n # harric added modality and segmentation_option auguments\n self.data_labelled.sample_per_volume(-1,self.conf.pctg_per_volume, seed=self.conf.seed)\n self.data_labelled.sample_by_volume(int(self.conf.l_mix * self.data_labelled.num_volumes), seed=self.conf.seed)\n\n self.data_labelled.crop(self.conf.input_shape[:2]) # crop data to input shape: useful in transfer learning\n # self.conf.data_len = self.data.size()\n\n datagen_dict1 = self.get_datagen_params()\n datagen_dict2 = self.get_datagen_params()\n datagen_dict3 = self.get_datagen_params()\n img_gen = ImageDataGenerator(**datagen_dict1).flow(self.data_labelled.images, batch_size=self.conf.batch_size,\n seed=self.conf.seed)\n anato_msk_gen = ImageDataGenerator(**datagen_dict2).flow(self.data_labelled.anato_masks, batch_size=self.conf.batch_size,\n seed=self.conf.seed)\n patho_msk_gen = ImageDataGenerator(**datagen_dict3).flow(self.data_labelled.patho_masks, batch_size=self.conf.batch_size,\n seed=self.conf.seed)\n scn_gen = utils.data_utils.generator(self.conf.batch_size, self.conf.seed, 'no_overflow', self.data_labelled.scanner)\n return itertools.zip_longest(img_gen, anato_msk_gen, patho_msk_gen, scn_gen)", "def get_DataLoader():\n img_data_transforms = {\n 'train': transforms.Compose([\n transforms.RandomCrop((cfg.DATA_SET.H_IMG, cfg.DATA_SET.W_IMG), pad_if_needed=True),\n transforms.ToTensor(),\n transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n ]),\n 'val': transforms.Compose([\n transforms.CenterCrop((cfg.DATA_SET.H_IMG, cfg.DATA_SET.W_IMG)),\n transforms.ToTensor(),\n transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n ])\n }\n\n\n train_img = datasets.ImageFolder(cfg.DATA_LOADER.TRAIN_IMG_FOLDER, img_data_transforms['train'])\n train_img_loader = torch.utils.data.DataLoader(train_img, \n batch_size=cfg.TRAIN.BATCH_SIZE, \n shuffle=True,\n num_workers=cfg.DATA_LOADER.NUM_THREADS)\n\n val_img = datasets.ImageFolder(cfg.DATA_LOADER.VAL_IMG_FOLDER, img_data_transforms['val'])\n val_img_loader = torch.utils.data.DataLoader(val_img, \n batch_size=cfg.TRAIN.BATCH_SIZE,\n shuffle=False, \n num_workers=cfg.DATA_LOADER.NUM_THREADS)\n\n return train_img_loader, val_img_loader", "def create_data_loaders(self, symbol, start_date, end_date, seq_len, batch_size):\n # Save the parameters to use in other functions\n self.start_date = start_date\n self.end_date = end_date\n self.symbol = symbol\n\n # Dataloaders\n train_data = StockData(seq_len, \"train\", symbol=symbol, start_date = start_date, end_date= end_date)\n self.train_loader = data.DataLoader(train_data, batch_size=batch_size, shuffle=False)\n val_data = StockData(seq_len, \"val\", symbol=symbol, start_date = start_date, end_date= end_date)\n self.val_loader = data.DataLoader(val_data, batch_size=batch_size, shuffle=False)\n test_data = StockData(seq_len, \"test\", symbol=symbol, start_date = start_date, end_date= end_date)\n self.test_loader = data.DataLoader(test_data, batch_size=batch_size, shuffle=False)\n\n # We will use this scaler to inverse scale of model outputs.\n self.scaler = train_data.scaler", "def run(self):\n # Tokenize the data.\n tokenized_sequence = self.tokenizer.run()\n # Split the data into batches.\n batches = self.batch_sequence(tokenized_sequence, self.data_config[\"batch_size\"],\n self.data_config[\"sequence_bucketing\"])\n random.shuffle(batches)\n # Compute the start index for validation dataset.\n split_index = int(len(batches) * self.data_config[\"train_val_proportion\"])\n # Wrap pytorch DataLoaders around the train batches.\n train_loader = self._init_loader(batches[:-split_index])\n # Wrap pytorch DataLoaders around the validation batches.\n validation_loader = self._init_loader(batches[-split_index:])\n return train_loader, validation_loader", "def build_detection_train_loader(cfg):\n\n # CSD: check config is supported\n assert cfg.DATALOADER.SAMPLER_TRAIN == \"TrainingSampler\", \"Unsupported training sampler: {}\".format(\n cfg.DATALOADER.SAMPLER_TRAIN\n )\n\n # Original code\n dataset = get_detection_dataset_dicts(\n cfg.DATASETS.TRAIN,\n filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,\n min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE if cfg.MODEL.KEYPOINT_ON else 0,\n proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,\n )\n\n # CSD: subsample the dataset if needed\n dataset = check_subsample_dataset(dataset, cfg)\n\n if comm.is_main_process(): # Log counts\n logger = setup_logger(name=__name__)\n logger.debug(\"Number of images in the dataset: {}\".format(len(dataset)))\n _log_api_usage(\"dataset.\" + cfg.DATASETS.TRAIN[0])\n\n # Original code\n mapper = DatasetMapper(cfg, True)\n\n sampler = TrainingSampler(len(dataset))\n\n dataset = DatasetFromList(dataset, copy=False)\n dataset = MapDataset(dataset, mapper)\n sampler = TrainingSampler(len(dataset))\n assert isinstance(sampler, torch.utils.data.sampler.Sampler)\n\n return build_batch_data_loader(\n dataset,\n sampler,\n cfg.SOLVER.IMS_PER_BATCH,\n aspect_ratio_grouping=cfg.DATALOADER.ASPECT_RATIO_GROUPING,\n num_workers=cfg.DATALOADER.NUM_WORKERS,\n )", "def _construct_loader(self):\n # Get list of paths\n os.makedirs(self.path_to_data_dir, exist_ok=True)\n path_to_file = os.path.join(\n self.path_to_data_dir, f\"{self.ds_name}_{self.mode}.txt\"\n )\n if not os.path.exists(path_to_file) and self.ds_name != 'audioset':\n files = list(sorted(glob.glob(os.path.join(self.data_prefix, '*', '*')))) \n with open(path_to_file, 'w') as f:\n for item in files:\n f.write(\"%s\\n\" % item)\n\n self._path_to_videos = []\n self._labels = []\n self._spatial_temporal_idx = []\n self._vid_indices = []\n with open(path_to_file, \"r\") as f:\n for clip_idx, path in enumerate(f.read().splitlines()):\n for idx in range(self._num_clips):\n self._path_to_videos.append(\n os.path.join(self.data_prefix, path)\n )\n if self.ds_name != 'audioset':\n class_name = path.split('/')[-2]\n label = self.class_to_idx[class_name]\n self._labels.append(int(label))\n self._spatial_temporal_idx.append(idx)\n self._vid_indices.append(clip_idx)\n self._video_meta[clip_idx * self._num_clips + idx] = {}\n assert (\n len(self._path_to_videos) > 0\n ), \"Failed to load {} split {} from {}\".format(\n self.ds_name, self._split_idx, path_to_file\n )\n print(\n \"Constructing {} dataloader (size: {}) from {}\".format(\n self.ds_name, len(self._path_to_videos), path_to_file\n )\n )\n\n # Create / Load valid indices (has audio)\n vid_valid_file = f'{self.path_to_data_dir}/{self.ds_name}_valid.pkl'\n if os.path.exists(vid_valid_file):\n with open(vid_valid_file, 'rb') as handle:\n self.valid_indices = pickle.load(handle)\n else:\n self.valid_indices = filter_videos(self._path_to_videos)\n with open(vid_valid_file, 'wb') as handle:\n pickle.dump(\n self.valid_indices, \n handle, \n protocol=pickle.HIGHEST_PROTOCOL\n )\n if self.num_data_samples is not None:\n self.valid_indices = self.valid_indices[:self.num_data_samples]\n print(f\"Total number of videos: {len(self._path_to_videos)}, Valid videos: {len(self.valid_indices)}\", flush=True)\n\n # Make lists a Manager objects\n #self._path_to_videos = self.manager.list(self._path_to_videos)\n self.valid_indices = list(self.valid_indices)", "def create_data_batches(X, y=None, batch_size=BATCH_SIZE, valid_data=False, test_data=False):\n\n # if the data is test datasrt, wr probably dont have labels\n if test_data:\n print('Creating the data batches...')\n data = tf.data.Dataset.from_tensor_slices((tf.constant(X)))\n data_batch = data.map(process_image).batch(BATCH_SIZE)\n return data_batch\n # if the data is valid dataset, we don't need to shuffle it\n elif valid_data:\n print('Creating validation data batches...')\n data = tf.data.Dataset.from_tensor_slices((tf.constant(X), # filepaths\n tf.constant(y))) # labels\n data_batch = data.map(get_image_label).batch(BATCH_SIZE)\n return data_batch\n\n else:\n print('Creating training data batches...')\n # Turn filepaths and labels into Tensors\n data = tf.data.Dataset.from_tensor_slices((tf.constant(X),\n tf.constant(y)))\n # shuffling pathnames and labels before mapping image processor funtion is faster than shuffling images\n data = data.shuffle(buffer_size=len(X))\n # create image label tuples this alos turn the image path into preprocesse image\n data = data.map(get_image_label)\n\n # turn the training data into batches\n data_batch = data.batch(BATCH_SIZE)\n\n return data_batch", "def prepare_data(self):\n self.tokenizer = custom_tokenizer_from_pretrained(\n self.tokenizer_name_or_path, self.cache_dir\n )\n try:\n self.train_examples = ExamplesBuilder(\n self.data_dir,\n Split.train,\n delimiter=self.delimiter,\n ).examples\n self.val_examples = ExamplesBuilder(\n self.data_dir,\n Split.dev,\n delimiter=self.delimiter,\n ).examples\n self.test_examples = ExamplesBuilder(\n self.data_dir,\n Split.test,\n delimiter=self.delimiter,\n ).examples\n\n if self.num_samples > 0:\n self.train_examples = self.train_examples[: self.num_samples]\n self.val_examples = self.val_examples[: self.num_samples]\n self.test_examples = self.test_examples[: self.num_samples]\n\n # create label vocabulary from dataset\n all_examples = self.train_examples + self.val_examples + self.test_examples\n all_labels = sorted(\n {\n tag.label\n for ex in all_examples\n for tag in ex.labels\n if tag.bio != BIO.O\n }\n )\n self.label_list = [BIO.O.value] + sorted(all_labels)\n label_types = sorted(\n {\n tag.tagtype.value\n for ex in all_examples\n for tag in ex.labels\n if tag.bio != BIO.O\n }\n )\n with open(self.labels_path, \"w\") as fp:\n for l in label_types:\n fp.write(l)\n fp.write(\"\\n\")\n\n self.label_to_id = {l: i for i, l in enumerate(self.label_list)}\n self.id_to_label = self.label_list\n\n start = time.time()\n self.train_dataset = self.create_dataset(\n self.train_examples, self.tokenizer, self.label_to_id\n )\n end = time.time()\n read_time = end - start\n logger.info(f\"DATASET TIME(train): {read_time}\")\n\n start = time.time()\n self.val_dataset = self.create_dataset(\n self.val_examples, self.tokenizer, self.label_to_id\n )\n end = time.time()\n read_time = end - start\n logger.info(f\"DATASET TIME(val): {read_time}\")\n\n start = time.time()\n self.test_dataset = self.create_dataset(\n self.test_examples, self.tokenizer, self.label_to_id\n )\n end = time.time()\n read_time = end - start\n logger.info(f\"DATASET TIME(test): {read_time}\")\n\n self.dataset_size = len(self.train_dataset)\n\n logger.info(self.val_examples[:3])\n logger.info(self.val_dataset[:3])\n\n except NoLocalFileError as e:\n logger.error(e)\n exit(1)", "def get_loader(args, batch_size, vocab, shuffle, num_workers, use_video=False):\n\n if use_video:\n tasty_videos = TastyVideoDataset()\n data_loader = torch.utils.data.DataLoader(dataset=tasty_videos,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers)\n else:\n recipe1m = Recipe1MDataset(args, vocab)\n data_loader = torch.utils.data.DataLoader(dataset=recipe1m,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate_fn)\n return data_loader" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function is a fix for correcting the RETN address based on IDA Pro's function "length" This is done by rewinding 3 bytes from the current retnAddress to find 0xC2, 0xC3, 0xEB, 0xE9. It's no way near pefect, but most addresses are corrected. A new copy of the addresses will be saved, and returns a new function list (same type) This function should be called from self.run()
def __correctIDAProRETNs(self, dbg, functions):
[ "def correctFips(stateCodes, FIPS):\n return [str(stateCode) + str(fcode).zfill(3) for stateCode,fcode in zip(stateCodes,FIPS)]", "def _check_lfn2pfn(self):\n for lfn in SE_PROBES_BYTYPE[self.rsetype]:\n\n # this is what rucio does\n pfn = self.proto['scheme'] + '://' + self.proto['hostname'] + \\\n ':' + str(self.proto['port'])\n\n if 'web_service_path' in self.proto['extended_attributes']:\n pfn = pfn + self.proto['extended_attributes']['web_service_path']\n\n pfn = pfn + '/' + cmstfc('cms', lfn, None, None, self.proto)\n\n # this should match dataservice pfn, modulo some normalization\n # (e.g.: adding the port number)\n pfn_datasvc = []\n\n wo_port = self.pcli.lfn2pfn(\n pnn=self.pnn, lfn=lfn, tfc=self.tfc,\n protocol=self.proto['extended_attributes']['tfc_proto'])\n wo_port = re.sub('/+', '/', wo_port)\n w_port = wo_port.replace(\n self.proto['hostname'],\n self.proto['hostname'] + ':' + str(self.proto['port'])\n )\n\n # Get rid of ALL multiple slashes, including separating protocol from host (valid for comparison only)\n pfn_datasvc.append(wo_port)\n pfn_datasvc.append(w_port)\n pfn = re.sub('/+', '/', pfn)\n\n if pfn not in pfn_datasvc:\n raise Exception(\"rucio and datasvc lfn2pfn mismatch, rucio: %s ; datasvc: %s\" %\n (pfn, pfn_datasvc))\n\n logging.debug(\"checking lfn2pfn ok %s\", pfn)", "def guess_relocations_mask(func, sig_length):\n\n\tmask = [False] * sig_length\n\ti = 0\n\twhile i < sig_length:\n\t\tbb = func.get_basic_block_at(func.start + i)\n\t\tif not bb: # not in a basicblock; wildcard\n\t\t\tmask[i] = False\n\t\t\ti += 1\n\t\t\tcontinue\n\n\t\tbb._buildStartCache()\n\t\tif not bb._instLengths:\n\t\t\ti += 1\n\t\t\tcontinue\n\t\tfor insn_len in bb._instLengths:\n\t\t\tllil = func.get_low_level_il_at(func.start + i, bb.arch)\n\t\t\tinsn_mask = not is_llil_relocatable(llil)\n\t\t\t# if not insn_mask:\n\t\t\t# func.set_auto_instr_highlight(func.start + i, HighlightStandardColor.BlueHighlightColor)\n\t\t\tmask[i:min(i + insn_len, sig_length)] = [insn_mask] * min(insn_len, sig_length - i)\n\t\t\ti += insn_len\n\t\t\tif i >= sig_length: break\n\treturn mask", "def make_functions(self):\n\n # TODO: Is it required that PLT stubs are always aligned by 16? If so, on what architectures and platforms is it\n # TODO: enforced?\n\n tmp_functions = self.kb.functions.copy()\n\n for function in tmp_functions.values():\n function.mark_nonreturning_calls_endpoints()\n\n # Clear old functions dict\n self.kb.functions.clear()\n\n blockaddr_to_function = {}\n traversed_cfg_nodes = set()\n\n function_nodes = set()\n\n # Find nodes for beginnings of all functions\n for _, dst, data in self.graph.edges(data=True):\n jumpkind = data.get(\"jumpkind\", \"\")\n if jumpkind == \"Ijk_Call\" or jumpkind.startswith(\"Ijk_Sys\"):\n function_nodes.add(dst)\n\n entry_node = self.model.get_any_node(self._binary.entry)\n if entry_node is not None:\n function_nodes.add(entry_node)\n\n # aggressively remove and merge functions\n # For any function, if there is a call to it, it won't be removed\n called_function_addrs = {n.addr for n in function_nodes}\n # Any function addresses that appear as symbols won't be removed\n predetermined_function_addrs = called_function_addrs | self._function_addresses_from_symbols\n\n removed_functions_a = self._process_irrational_functions(\n tmp_functions, predetermined_function_addrs, blockaddr_to_function\n )\n removed_functions_b, adjusted_cfgnodes = self._process_irrational_function_starts(\n tmp_functions, predetermined_function_addrs, blockaddr_to_function\n )\n self._process_jump_table_targeted_functions(\n tmp_functions,\n predetermined_function_addrs,\n blockaddr_to_function,\n )\n removed_functions = removed_functions_a | removed_functions_b\n\n # Remove all nodes that are adjusted\n function_nodes.difference_update(adjusted_cfgnodes)\n for n in self.graph.nodes():\n if n.addr in tmp_functions or n.addr in removed_functions:\n function_nodes.add(n)\n\n # traverse the graph starting from each node, not following call edges\n # it's important that we traverse all functions in order so that we have a greater chance to come across\n # rational functions before its irrational counterparts (e.g. due to failed jump table resolution)\n\n min_stage_2_progress = 50.0\n max_stage_2_progress = 90.0\n nodes_count = len(function_nodes)\n for i, fn in enumerate(sorted(function_nodes, key=lambda n: n.addr)):\n if self._low_priority:\n self._release_gil(i, 800, 0.000001)\n\n if self._show_progressbar or self._progress_callback:\n progress = min_stage_2_progress + (max_stage_2_progress - min_stage_2_progress) * (\n i * 1.0 / nodes_count\n )\n self._update_progress(progress)\n\n self._graph_bfs_custom(\n self.graph,\n [fn],\n self._graph_traversal_handler,\n blockaddr_to_function,\n tmp_functions,\n traversed_cfg_nodes,\n )\n\n # Don't forget those small function chunks that are not called by anything.\n # There might be references to them from data, or simply references that we cannot find via static analysis\n\n secondary_function_nodes = set()\n # add all function chunks (\"functions\" that are not called from anywhere)\n for func_addr in tmp_functions:\n node = self.model.get_any_node(func_addr)\n if node is None:\n continue\n if node.addr not in blockaddr_to_function:\n secondary_function_nodes.add(node)\n\n missing_cfg_nodes = set(self.graph.nodes()) - traversed_cfg_nodes\n missing_cfg_nodes = {node for node in missing_cfg_nodes if node.function_address is not None}\n if missing_cfg_nodes:\n l.debug(\"%d CFGNodes are missing in the first traversal.\", len(missing_cfg_nodes))\n secondary_function_nodes |= missing_cfg_nodes\n\n min_stage_3_progress = 90.0\n max_stage_3_progress = 99.9\n\n nodes_count = len(secondary_function_nodes)\n for i, fn in enumerate(sorted(secondary_function_nodes, key=lambda n: n.addr)):\n if self._show_progressbar or self._progress_callback:\n progress = min_stage_3_progress + (max_stage_3_progress - min_stage_3_progress) * (\n i * 1.0 / nodes_count\n )\n self._update_progress(progress)\n\n self._graph_bfs_custom(\n self.graph, [fn], self._graph_traversal_handler, blockaddr_to_function, tmp_functions\n )\n\n to_remove = set()\n\n # Remove all stubs after PLT entries\n if not is_arm_arch(self.project.arch):\n to_remove |= self._remove_dummy_plt_stubs(self.kb.functions)\n\n # remove empty functions\n for func in self.kb.functions.values():\n if func.startpoint is None:\n to_remove.add(func.addr)\n\n for addr in to_remove:\n del self.kb.functions[addr]\n\n # Update CFGNode.function_address\n for node in self._nodes.values():\n if node.addr in blockaddr_to_function:\n node.function_address = blockaddr_to_function[node.addr].addr\n\n # Update function.info\n for func in self.kb.functions.values():\n if func.addr in tmp_functions:\n func.info = tmp_functions[func.addr].info", "def fix_address_decoding(decoded, types):\n rval = []\n for val, type in zip(decoded, types):\n if type == 'address':\n rval.append('0x{}'.format(val.decode('ascii')))\n elif type == 'address[]':\n rval.append(['0x{}'.format(v.decode('ascii')) for v in val])\n elif type == 'string':\n rval.append(val.rstrip(b'\\x00').decode('utf-8'))\n else:\n rval.append(val)\n return rval", "def getFunctionsToRename(self):\n functions_to_rename = []\n for function_address_to_tag in self.last_scan_result.keys():\n new_function_name = self.last_scan_result[function_address_to_tag].function_name\n # has the function still a dummy name?\n if self.ida_proxy.GetFlags(function_address_to_tag) & self.ida_proxy.FF_LABL > 0:\n tags_for_function = self.getTagsForFunctionAddress(function_address_to_tag)\n for tag in sorted(tags_for_function, reverse=True):\n if tag not in new_function_name:\n new_function_name = tag + self.renaming_seperator + new_function_name\n functions_to_rename.append({\"old_function_name\": \\\n self.last_scan_result[function_address_to_tag].function_name, \"new_function_name\": \\\n new_function_name, \"function_address\": function_address_to_tag})\n return functions_to_rename", "def _load_func_addrs_from_symbols(self):\n\n return {sym.rebased_addr for sym in self._binary.symbols if sym.is_function}", "def get_function_instructions(self, _ea):\n\t\tinstr = []\n\t\tif (_ea != BADADDR):\n\t\t\tinstr_matrix = self.get_function_disasm(_ea)\n\t\t\tfor line in instr_matrix:\n\t\t\t\tinstr.append(line[0])\n\t\treturn instr", "def get_disasm_all_functions_from(self, _funcea):\n\t\tfdisasm = {}\n\t\tif (_funcea != BADADDR):\n\t\t\tfroot_disasm = self.get_disasm_function_line(_funcea)\n\t\t\tfroot_name = GetFunctionName(_funcea)\n\t\t\tfdisasm[froot_name] = froot_disasm\n\t\t\tfcalled = self.get_all_sub_functions_called(_funcea, _visited=[])\n\t\t\tprint(fcalled)\n\t\t\tif (len(fcalled) > 0):\n\t\t\t\tprint(\"[*] Retrieving assembly from {:d} function(s).\".format(len(fcalled)))\n\t\t\t\tfor finfo in fcalled:\n\t\t\t\t\tfea = finfo[1]\n\t\t\t\t\tfname = finfo[2]\n\t\t\t\t\tfcode = self.get_disasm_function_line(fea)\n\t\t\t\t\tfdisasm[fname] = fcode\n\t\treturn fdisasm", "def test_ipv4address_reverse_pointer(self):\n n = 10**5\n addr = ip.IPv4Address('1.2.3.4')\n time1, result1 = timefn(n, lambda: addr.reverse_pointer)\n eaddr = eip.IPv4Address('1.2.3.4')\n time2, result2 = timefn(n, lambda: eaddr.reverse_pointer)\n results = (time1, result1), (time2, result2)\n self.report_4a.report(fn_name(), n, results, addr)", "def func_addresses(binary):\n pattern = (r'^\\s*0*([{0}]+)\\s+(?:g|l)\\s+F [.]text\\s+([{0}]{{8}})\\s+(.*)\\s*$')\\\n .format(string.hexdigits)\n return sorted(_symtab_extract(binary, pattern),\n key=lambda tup: int(tup[0],16) )", "def get_raw_func(self, name):\n if name not in self.funcs:\n return []\n sym = self.get_symbol(name)\n addr = sym.rebased_addr\n end_addr = addr + sym.size\n self.log.debug('extracting raw function %s at %#x', name, addr)\n\n body = []\n for i in range(addr, end_addr, 4):\n instr = self.get_instr(i)\n if instr is None:\n continue\n body.append(instr)\n return body", "def test_ipv6address_reverse_pointer(self):\n n = 10**4\n addr = ip.IPv6Address('1:2:3:4:5:6::')\n time1, result1 = timefn(n, lambda: addr.reverse_pointer)\n eaddr = eip.IPv6Address('1:2:3:4:5:6::')\n time2, result2 = timefn(n, lambda: eaddr.reverse_pointer)\n results = (time1, result1), (time2, result2)\n self.report_6a.report(fn_name(), n, results, addr)", "def switch_to_address(self, ea):\n self.__ea = ea\n decompile_function_wrapper(cache_only=True, do_show=False)\n return", "def func_tail_removed(pfn, ea):\n\n # first we'll grab the addresses from our refs\n listable = internal.comment.contents.address(ea, target=interface.range.start(pfn))\n\n # these should already be sorted, so our first step is to filter out what\n # doesn't belong. in order to work around one of the issues posed in the\n # issue arizvisa/ida-minsc#61, we need to explicitly check that each item is\n # not None prior to their comparison against `pfn`. this is needed in order\n # to work around a null-pointer exception raised by SWIG when it calls the\n # area_t.__ne__ method to do the comparison.\n missing = [ item for item in listable if not idaapi.get_func(item) or idaapi.get_func(item) != pfn ]\n\n # if there was nothing found, then we can simply exit the hook early\n if not missing:\n return\n\n # now iterate through the min/max of the list as hopefully this is\n # our event.\n for ea in database.address.iterate(min(missing), max(missing)):\n for k in database.tag(ea):\n internal.comment.contents.dec(ea, k, target=interface.range.start(pfn))\n internal.comment.globals.inc(ea, k)\n logging.debug(u\"{:s}.func_tail_removed({:#x}, {:#x}) : Exchanging (increasing) reference count for global tag {!s} and (decreasing) reference count for contents tag {!s}.\".format(__name__, interface.range.start(pfn), ea, utils.string.repr(k), utils.string.repr(k)))\n continue\n return", "def fix_up(self):\n\n # get the offset to the fix up array\n offset = unpack(\"<H\", self._entry[4:6])[0]\n print (\"Offset to fix up array: %d\" % offset)\n\n # get the number of entries in the fix up array\n num = unpack(\"<H\", self._entry[6:8])[0]\n print (\"Number of entries in the fix up array: %d\" % num)\n\n # get the fixup signature\n signature = ''.join('{:02x}'.format(b) for b in reversed(self._entry[offset:offset + 2]))\n print (\"Fixup sig: 0x\" + signature)\n\n # read in the fixup array\n fixup_array = []\n for i in range(0, num - 1):\n fixup_array.append(self._entry[offset + 2 + i * 2: offset + 4 + i * 2])\n\n # overwrite proper values\n temp_entry = [] # cannot overwrite bytes without making a new array\n current_offset = 0\n\n for i in range(0, num - 1):\n sector_offset = 510 * (i + 1) + i * 2\n\n bytes = \"0x\" + ''.join('{:02x}'.format(b) for b in\n reversed(self._entry[sector_offset:sector_offset + 2]))\n print (\"Bytes %d/%d %s;\" % (sector_offset, sector_offset + 1, bytes), end=\" \")\n\n print (\"Overwriting 0x%s into bytes %d/%d\" %\n (''.join('{:02x}'.format(b) for b in reversed(fixup_array[i])),\n sector_offset, sector_offset + 1))\n\n # add sector up until last two bytes\n temp_entry.extend(self._entry[current_offset:sector_offset])\n\n # add fixup value\n temp_entry.extend(fixup_array[i])\n\n # replace value in the fixup array with the one on disk\n fixup_array[i] = self._entry[sector_offset:sector_offset + 2]\n\n # update offset\n current_offset = sector_offset + 2\n\n # create temp_entry as bytearray\n temp_entry = bytearray(temp_entry)\n self._entry = temp_entry # overwrite the bytes in memory\n\n print (\"\")", "def fixAstrometry(self,obs,skip):\n\n print \"Now correcting astrometric zeropoint...\"\n astrom=astrometer.gscMatchup(obs,skip)\n \n try:\n rval = astrom.findAstromCorrs()\n except astrometer.WebQueryError,err:\n warntxt = \"Caught a WebQueryError. Astrometric matchup not successful.\"\n print warntxt\n self.logfile.write(warntxt)\n self.logfile.write(str(err))\n self.errorList.append((self.modName,warntxt))\n self.errorList.append((self.modName,str(err)))\n raise astrometer.WebQueryError,err\n \n if not rval:\n print \"Astrometric matchup successful.\"\n self.logfile.write(\"Astrometric matchup successful.\")\n self.logfile.write(\"Applying corrections.\")\n #pdb.set_trace()\n astrom.applyCorrs()\n return", "def fix_tail_call_targets(bv, func):\n for block in func.basic_blocks:\n # This will return a list of all basic blocks starting at the same address\n # The same block appearing in different functions (after inlining)\n # will appear as multiple `BasicBlock`s\n all_blocks = bv.get_basic_blocks_at(block.start)\n\n # There should only be a single block found\n if len(all_blocks) > 1:\n log.debug('Block 0x%x exists in multiple functions, defining a new function here', block.start)\n\n # Define a function here and reanalyze\n # All blocks contained in this new function will not be picked up\n # in the remainder of this loop after analysis\n bv.add_function(block.start)\n bv.update_analysis_and_wait()", "def __mapping_entry_handler(vnic, ep_ip_prefixes):\n api.Logger.info(f\"Fixing local and remote mapping for {vnic}, ip prefixes {ep_ip_prefixes} \")\n ep_ips = set()\n for prefix in ep_ip_prefixes:\n ep_ips.add(__ip_from_prefix(prefix))\n api.Logger.info(f\"IP address set: {ep_ips}\")\n\n # Delete lmap entries\n for lmap in vnic.Children.copy():\n api.Logger.info(f\"Handling lmap {lmap} {lmap.IP}\")\n if lmap.IP in ep_ips:\n ep_ips.remove(lmap.IP)\n else:\n lmap.Delete()\n lmap.Destroy()\n # Delete rmap entries on other nodes\n for node in api.GetNaplesHostnames():\n if node != vnic.Node:\n node_subnet = subnet_client.GetSubnetObject(node, vnic.SUBNET.SubnetId)\n rmap = node_subnet.GetRemoteMappingObjectByIp(lmap.IP)\n assert(rmap)\n rmap.Delete()\n rmap.Destroy()\n\n # Add new lmap entries\n for ep_ip in ep_ips:\n lmap_spec = dict()\n lmap_spec['origin'] = 'discovered'\n lmap_spec['lipaddr'] = ep_ip\n vnic_spec = parser.Dict2Object({'lmap': [lmap_spec]})\n lmap_client.GenerateObjects(vnic.Node, vnic, vnic_spec)\n\n # Add rmap entries on other nodes\n for node in api.GetNaplesHostnames():\n if node != vnic.Node:\n mac = \"macaddr/%s\"%vnic.MACAddr.get()\n rmap_spec = dict()\n rmap_spec['rmacaddr'] = objects.TemplateFieldObject(mac)\n rmap_spec['ripaddr'] = ep_ip\n ipversion = utils.IP_VERSION_6 if lmap.AddrFamily == 'IPV6' else utils.IP_VERSION_4\n node_subnet = subnet_client.GetSubnetObject(node, vnic.SUBNET.SubnetId)\n rmap_client.GenerateObj(node, node_subnet, rmap_spec, ipversion)\n\n # Dump all local and remote mappings\n #__dump_client_dol_db([lmap_client, rmap_client])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function will create breakpoints.txt for InMemoryFuzzer.py
def createBreakpoints(self):
[ "def create_fuzzer(out):\n with open(fuzzer_filename, 'r') as fuzzer_file:\n for line in fuzzer_file:\n out.write(line)", "def setUp(self):\n for patch_name in ['a', 'b', 'c']:\n open(os.path.join(self.STASH_PATH, patch_name), 'w').write(patch_name.upper())", "def load_breakpoints(args):\n break_dict = {}\n if args.breakpoints:\n with open(args.breakpoints, 'r') as in_handle:\n for line in in_handle:\n if line[0] != '#':\n line = line.strip().split('\\t')\n seq = line[0]\n coords = [int(coord)-1 for coord in line[1:]] # 0-based\n break_dict[seq] = coords\n return break_dict", "def create_file_with_xfails():\n with open(os.path.join(BASE_DIR, EXPECTED_FAILURES_FILE), \"wb\") as f:\n yaml.dump(EXPECTED_FAILURES, f, default_flow_style=False)\n\n return os.path.join(os.getcwd(), BASE_DIR, EXPECTED_FAILURES_FILE)", "def _generateDisassemblyFiles(targetFile, outFile, fileTestCases):\n try:\n for testCase in fileTestCases:\n # Check if disassembly file exists before running\n disassemblyFile = \"%s_%s.dyndis\" % (targetFile.replace(\".c\",\"\"), testCase[testCase.rfind(\"/\")+1:].replace(\".txt\",\"\"))\n if os.path.exists(disassemblyFile):\n prettyPrint(\"Disassembly file \\\"%s\\\" already exists. Skipping\" % disassemblyFile, \"warning\")\n continue\n # (2.b.i) Parse the KLEE test file and retrieve the list of arguments\n runArgs, inputFile = loadArgumentsFromKLEE(testCase)\n # (2.b.ii) Generate a GDB script to \"run\" with these two inputs\n generateGDBScript(outFile.replace(\".out\", \".txt\"), inputFile=testCase.replace(\".txt\",\".input\"))\n # (2.b.iii) Launch the GDB script\n prettyPrint(\"Launching the GDB script. Release the Kraken!!\")\n gdbOutput = subprocess.Popen([\"gdb\", \"-batch\", \"-x\", outFile.replace(\".out\",\".script\"), outFile], stderr=subprocess.STDOUT, stdout=subprocess.PIPE).communicate()[0]\n # Check that the output does not indicate erroneous runtime behavior\n if not checkTestCaseSuccess(gdbOutput):\n prettyPrint(\"The test case \\\"%s\\\" crashed the file \\\"%s\\\". Skipping\" % (testCase, targetFile), \"warning\")\n continue\n # (2.b.iv) Get the instruction trace of the process from \"gdb.txt\" and extract features from it\n if os.path.exists(outFile.replace(\".out\",\".txt\")):\n # Store the contents of \"gdb.txt\" as disassembly for further processing\n prettyPrint(\"Dumping dynamic disassembly to \\\"%s\\\"\" % disassemblyFile, \"debug\")\n gdbFile = open(disassemblyFile, \"w\")\n gdbFileContent = open(outFile.replace(\".out\",\".txt\")).read()\n if gdbFileContent.find(\"Segmentation fault\") != -1:\n prettyPrint(\"Test case \\\"%s\\\"crashed the file \\\"%s\\\". Skipping\" % (testCase, targetFile), \"warning\")\n continue\n # Otherwise write content to file\n gdbFile.write(gdbFileContent)\n gdbFile.close()\n # Also generate a label file for ease of retrieval\n labelFile = open(disassemblyFile.replace(\".dyndis\", \".label\"), \"w\")\n labelFile.write(\"%s\\n\" % loadLabelFromFile(targetFile.replace(\".c\",\".metadata\"))[0])\n labelFile.close()\n \n os.unlink(outFile.replace(\".out\",\".txt\")) # Remove the gdb logging file to avoid appending to it\n os.unlink(outFile.replace(\".out\",\".script\"))\n os.unlink(outFile)\n\n except Exception as e:\n prettyPrint(\"Error encountered in \\\"_generateDisassemblyFiles\\\": %s\" % e, \"error\")\n return False\n\n return True", "def generate_comment(file_name, function):\n infile = open(file_name, \"at\")\n string = \"/*\\n* Generate by Deepfuzzer\\n\"\n infile.write(string)\n string = \"* Target Function: \" + function.prototype + \"\\n\"\n infile.write(string)\n now = os.popen(\"date\").read().split('\\n')[0]\n string = \"* Time: \" + now+\"\\n*/\\n\\n\"\n infile.write(string)\n infile.close()", "def tasks():\n\n # Remove any existing codebreak text outputs\n if os.path.exists('./codebreak.txt'):\n os.remove('./codebreak.txt')\n\n # CODEBREAKER 1\n settings = {'code':'DMEXBMKYCVPNQBEDHXVPZGKMTFFBJRPJTLHLCHOTKOYXGGHZ',\n 'crib': 'SECRETS',\n 'rotors': 'Beta Gamma V',\n 'reflector': 'UNKNOWN',\n 'ring_settings': '04 02 14',\n 'initial_positions':'M J M',\n 'plugboard_pairs': 'KI XN FL'}\n e = CodeBreaker(settings)\n e.codebreak1_reflector()\n\n # CODEBREAKER 2\n settings = {'code':'CMFSUPKNCBMUYEQVVDYKLRQZTPUFHSWWAKTUGXMPAMYAFITXIJKMH',\n 'crib': 'UNIVERSITY',\n 'rotors': 'Beta I III',\n 'reflector': 'B',\n 'ring_settings': '23 02 10',\n 'initial_positions':'UNKNOWN',\n 'plugboard_pairs': 'VH PT ZG BJ EY FS'}\n e = CodeBreaker(settings)\n e.codebreak2_positions()\n\n # CODEBREAKER 3\n settings = {'code':'ABSKJAKKMRITTNYURBJFWQGRSGNNYJSDRYLAPQWIAGKJYEPCTAGDCTHLCDRZRFZHKNRSDLNPFPEBVESHPY',\n 'crib': 'THOUSANDS',\n 'rotors': 'UNKNOWN',\n 'reflector': 'UNKNOWN',\n 'ring_settings': 'UNKNOWN',\n 'initial_positions':'E M Y',\n 'plugboard_pairs': 'FH TS BE UQ KD AL'}\n e = CodeBreaker(settings)\n e.codebreak3_multi()\n\n # CODEBREAKER 4\n settings = {'code': 'SDNTVTPHRBNWTLMZTQKZGADDQYPFNHBPNHCQGBGMZPZLUAVGDQVYRBFYYEIXQWVTHXGNW',\n 'crib':'TUTOR',\n 'rotors':'V III IV',\n 'reflector':'A',\n 'ring_settings':'24 12 10',\n 'initial_positions':'S W U',\n 'plugboard_pairs': 'WP RJ A? VF I? HN CG BS'}\n e = CodeBreaker(settings)\n e.codebreak4_plugleads()\n\n # CODEBREAKER 5\n settings = {'code': 'HWREISXLGTTBYVXRCWWJAKZDTVZWKBDJPVQYNEQIOTIFX',\n 'crib':['FACEBOOK','INSTAGRAM','TWITTER','SNAPCHAT','YOUTUBE','REDDIT','LINKEDIN'],\n 'rotors':'V II IV',\n 'reflector':'A',\n 'ring_settings':'6 18 7',\n 'initial_positions':'A J L',\n 'plugboard_pairs': 'UG IE PO NX WT'}\n e = CodeBreaker(settings)\n e.codebreak5_rewiring()", "def create_test_files():\n\n test_sample_dir = SAMPLE_DIR # Directory where the checkpoints will be saved\n\n if test_sample_dir.exists() is False:\n test_sample_dir.mkdir(parents=True, exist_ok=True)\n\n # Create directories\n dirs = [\"apple\", \"banana\", \"coconut\"]\n for dir in dirs:\n dir = test_sample_dir / Path(dir)\n if dir.exists() is False:\n dir.mkdir(parents=True, exist_ok=True)\n log.info(\"%s created\" % (dir))\n else:\n log.info(\"%s found. Skip creation\" % (dir))\n\n # Create files\n files = [\"apple/apple.txt\", \"banana/banana.txt\", \"coconut/coconut.txt\"]\n text_body = [\"Apples Apples apples Apples are delicious. They are from New Hampshire.\",\n \"Bananas are good for breakfast. What's new? We can make the best Banana Sundae, our new menu item!\",\n \"You can make juice out of coconuts. apples?\"]\n for i, f in enumerate(files):\n text_path = test_sample_dir / Path(f)\n if text_path.exists() is False:\n with open(text_path, \"w\") as fh:\n fh.write(text_body[i])\n log.info(\"%s written\" % (text_path))", "def test_address_breakpoints (self):\n self.build()\n self.address_breakpoints()", "def build_debug_html(self):\n with open(\"debug.html\", \"w+\") as f:\n self.add_prelude_boilerplate(f)\n self.build_state_colors()\n self.add_css_styling(f)\n self.add_cli_text(f)", "def main():\n empty_models = r\"\\{п \\}|\\{п \\}|\\{нп \\}|\\{нп \\}|\\{нп \\}|\\{п \\}|\\{п\\}|\\{нп\\}|\" \\\n r\"\\{п/нп\\}|\\{п/нп \\}|\\{п/нп \\}|\\{п/нп \\}|\\{возвр \\}|\" \\\n r\"\\{возвр \\}|\\{возвр \\}|\\{возвр\\}\"\n empty_verb_aspect = r\"\\[ \\{\"\n logic_operators_parenth = r\"\\|\\| \\)\\)|&& \\)\\)|\\|\\| \\)|&& \\)\"\n logic_two_or = r\"\\|\\| \\|\\||&& &&\"\n abscent_prep_and_case = r\"\\(0\\), \\(0\\)\"\n strange_dot = r\"•\"\n two_dots_empty = r\",\\. \\.\"\n models_together = r\"\\)[ACD]:|откуда[ACD]:\"\n two_braces = r\"\\{(возвр|нп|п|п/нп) \\{(возвр|нп|п|п/нп)\"\n\n file_empty_models = open(\"dictionary_bugs/bugs_empty_models.txt\", \"w\")\n file_empty_verb_aspect = open(\"dictionary_bugs/bugs_empty_verb_aspect.txt\", \"w\")\n file_logic_operators_parenth = open(\"dictionary_bugs/bugs_logic_operators_parenth.txt\", \"w\")\n file_logic_two_or = open(\"dictionary_bugs/bugs_logic_two_or.txt\", \"w\")\n file_abscent_prep_and_case = open(\"dictionary_bugs/bugs_abscent_prepNcase.txt\", \"w\")\n file_strange_dot = open(\"dictionary_bugs/bugs_strange_dot.txt\", \"w\")\n file_two_dots_empty = open(\"dictionary_bugs/bugs_two_dots_empty.txt\", \"w\")\n file_models_together = open(\"dictionary_bugs/bugs_models_together.txt\", \"w\")\n file_two_braces = open(\"dictionary_bugs/bugs_two_braces.txt\", \"w\")\n\n file_empty_models.write(\"# ошибки вида `{п }`, пустые модели управления\\n\")\n file_empty_verb_aspect.write(\"# ошибки вида `[ {`, отсутствует вид глагола\\n\")\n file_logic_operators_parenth.write(\"# ошибки вида `|| ))`\\n\")\n file_logic_two_or.write(\"# ошибки вида `|| ||`\\n\")\n file_abscent_prep_and_case.write(\"# ошибки вида `(0), (0)`, отсутствие предлога и падежа\\n\")\n file_strange_dot.write(\"# ошибки вида `•`, неопознанный символ\\n\")\n file_two_dots_empty.write(\"# ошибки вида `,. .`, многоточие\\n\")\n file_models_together.write(\"# ошибки вида `)C:`, слитные модели управления без разделителей (&&, ||)\\n\")\n file_two_braces.write(\"# ошибки вида '{возвр {возвр'\\n\")\n\n\n for line in sys.stdin:\n if not re.search(empty_models, line):\n if not re.search(empty_verb_aspect, line):\n if not re.search(logic_operators_parenth, line):\n if not re.search(logic_two_or, line):\n if not re.search(abscent_prep_and_case, line):\n if not re.search(strange_dot, line):\n if not re.search(two_dots_empty, line):\n if not re.search(models_together, line):\n if not re.search(two_braces, line):\n sys.stdout.write(line)\n\n if re.search(empty_models, line):\n file_empty_models.write(line)\n if re.search(empty_verb_aspect, line):\n file_empty_verb_aspect.write(line)\n if re.search(logic_operators_parenth, line):\n file_logic_operators_parenth.write(line)\n if re.search(logic_two_or, line):\n file_logic_two_or.write(line)\n if re.search(abscent_prep_and_case, line):\n file_abscent_prep_and_case.write(line)\n if re.search(strange_dot, line):\n file_strange_dot.write(line)\n if re.search(two_dots_empty, line):\n file_two_dots_empty.write(line)\n if re.search(models_together, line):\n file_models_together.write(line)\n if re.search(two_braces, line):\n file_two_braces.write(line)", "def test_generate_mapping_file(self):\n all_files = []\n output_fp, sampleID_count = generate_mapping_file(\n self.qiime_mapping_file_fp,\n all_files,\n config,\n total_tasks_created,\n output_dp,\n sampleID_count)", "def createTrainFile(self):\n\n a_patterns = self.readDataFile()\n self.randomizeAndWriteTrain(a_patterns)", "def gen_yaml(self):\n mjd_min = -21.*(1.+self.zval)\n mjd_max = 63.*(1.+self.zval)\n duration = (mjd_max-mjd_min)\n cad = 0.1*(1.+self.zval)\n\n with open(self.fake_orig, 'r') as file:\n filedata = file.read()\n filedata = filedata.replace('duration', str(duration))\n filedata = filedata.replace('mymin', str(mjd_min))\n filedata = filedata.replace('cadvalue', str(cad))\n with open('{}/{}'.format(self.fake_dir, self.fake_name), 'w') as file:\n file.write(filedata)", "def py_linetests(filediff):\n pass", "def create_scanned_files():\n file_paths = [\n os.path.join(STUBS_PATH, \"test3.h\"),\n os.path.join(STUBS_PATH, \"test4.h\"),\n os.path.join(STUBS_PATH, \"test5.h\"),\n os.path.join(STUBS_PATH, \"test6.h\")\n ]\n for file_path in file_paths:\n with open(file_path, \"w\") as new_file:\n if file_path in [os.path.join(STUBS_PATH, \"test3.h\")]:\n new_file.write(HEADER_WITHOUT_SPDX)\n elif file_path in [os.path.join(STUBS_PATH, \"test6.h\")]:\n new_file.write(HEADER_WITH_BINARY_LICENSE)\n else:\n new_file.write(HEADER_WITH_SPDX)\n yield\n for file_path in file_paths:\n os.remove(file_path)", "def testPattern(self):\n with TestLog.StdoutCapture(self.outputFilename):\n self.configure(\"\"\"\nlog4j.rootLogger=DEBUG, CA\nlog4j.appender.CA=ConsoleAppender\nlog4j.appender.CA.layout=PatternLayout\nlog4j.appender.CA.layout.ConversionPattern=%-5p %c %C %M (%F:%L) %l - %m - %X%n\n\"\"\")\n log.trace(\"This is TRACE\")\n log.info(\"This is INFO\")\n log.debug(\"This is DEBUG\")\n\n log.MDC(\"x\", 3)\n log.MDC(\"y\", \"foo\")\n log.MDC(\"z\", TestLog)\n\n log.trace(\"This is TRACE 2\")\n log.info(\"This is INFO 2\")\n log.debug(\"This is DEBUG 2\")\n log.MDCRemove(\"z\")\n\n log.trace(\"This is TRACE 3\")\n log.info(\"This is INFO 3\")\n log.debug(\"This is DEBUG 3\")\n log.MDCRemove(\"x\")\n log.trace(\"This is TRACE 4\")\n log.info(\"This is INFO 4\")\n log.debug(\"This is DEBUG 4\")\n\n log.trace(\"This is TRACE 5\")\n log.info(\"This is INFO 5\")\n log.debug(\"This is DEBUG 5\")\n\n log.MDCRemove(\"y\")\n\n # Use format to make line numbers easier to change.\n self.check(\"\"\"\nINFO root testPattern (test_log.py:{0[0]}) test_log.py({0[0]}) - This is INFO - {{}}\nDEBUG root testPattern (test_log.py:{0[1]}) test_log.py({0[1]}) - This is DEBUG - {{}}\nINFO root testPattern (test_log.py:{0[2]}) test_log.py({0[2]}) - This is INFO 2 - {{{{x,3}}{{y,foo}}{{z,<class '{1}.TestLog'>}}}}\nDEBUG root testPattern (test_log.py:{0[3]}) test_log.py({0[3]}) - This is DEBUG 2 - {{{{x,3}}{{y,foo}}{{z,<class '{1}.TestLog'>}}}}\nINFO root testPattern (test_log.py:{0[4]}) test_log.py({0[4]}) - This is INFO 3 - {{{{x,3}}{{y,foo}}}}\nDEBUG root testPattern (test_log.py:{0[5]}) test_log.py({0[5]}) - This is DEBUG 3 - {{{{x,3}}{{y,foo}}}}\nINFO root testPattern (test_log.py:{0[6]}) test_log.py({0[6]}) - This is INFO 4 - {{{{y,foo}}}}\nDEBUG root testPattern (test_log.py:{0[7]}) test_log.py({0[7]}) - This is DEBUG 4 - {{{{y,foo}}}}\nINFO root testPattern (test_log.py:{0[8]}) test_log.py({0[8]}) - This is INFO 5 - {{{{y,foo}}}}\nDEBUG root testPattern (test_log.py:{0[9]}) test_log.py({0[9]}) - This is DEBUG 5 - {{{{y,foo}}}}\n\"\"\".format([x + 130 for x in (0, 1, 8, 9, 13, 14, 17, 18, 21, 22)], __name__)) # noqa E501 line too long", "def test_write_debug_data(simulation_factory, lattice_snapshot_factory,\n tmp_path):\n sim = simulation_factory(lattice_snapshot_factory())\n\n mc = hoomd.hpmc.integrate.ConvexPolyhedron()\n mc.shape['A'] = dict(vertices=[\n (-0.5, 0, 0),\n (0.5, 0, 0),\n (0, -0.5, 0),\n (0, 0.5, 0),\n (0, 0, -0.5),\n (0, 0, 0.5),\n ])\n\n sim.operations.integrator = mc\n\n sim.write_debug_data(tmp_path / 'test_unscheduled.json')\n\n sim.run(10)\n\n sim.write_debug_data(tmp_path / 'test_scheduled.json')", "def _DebugParseFileEntry(self):\n pdb.post_mortem()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Main function for class Tracer pid process ID (for pydbg.attach()) functions an array of modified/corrected function list
def run(self, pid, functions): raw_input("[*] When you're ready, press [ENTER] to continue...") dbg = pydbg() dbg.attach(pid) try: functions = self.__correctIDAProRETNs(dbg, functions) #Correct RETN addresses - IDA specific problem except: print "[*] Error: Either you don't have the right function list, or the component is not loaded at the moment" sys.exit(-1) print "[*] Enumerating functions...", counter = 0 hooks = utils.hook_container() for addr in functions: counter += 1 hooks.add(dbg, addr[0], 10, self.log, None) #Only look at the first 10 arguments print " %s hooks added" %counter print "[*] Press [CTRL]+[C] to stop..." dbg.run() print "[*] And we're done with tracing"
[ "def updatePidList(self):", "def frompointer(*args) -> \"tid_array *\":\n return _ida_pro.tid_array_frompointer(*args)", "def advapi32_ProcessTrace(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"HandleArray\", \"HandleCount\", \"StartTime\", \"EndTime\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def __correctIDAProRETNs(self, dbg, functions):", "def tid_array_frompointer(*args) -> \"tid_array *\":\n return _ida_pro.tid_array_frompointer(*args)", "def pid_thread_list(self, pid):\n self.writeCommand('pid_thread_list', pid)\n return self", "def call_tracing(func,args):\n\tpass", "def _code_vars(self, lines, spacer, testid):\n #This method only gets called if we are the main executable being tested.\n if type(self.executable).__name__ == \"Function\":\n if (self.executable.name + \"_fpy\").lower() not in self.variables:\n lines.append(\"{}{}\".format(spacer, self.executable.definition(\"_fpy\")))\n\n if self.timed(testid):\n lines.append(\"{}real(fdp) :: fpy_start, fpy_end, fpy_elapsed = 0\".format(spacer))", "def _code_call(self, lines, spacer, testid):\n #If we are timing the executable, we want to get the time before and after the\n #execution of just this method and then add it to the elapsed time.\n lines.append(\"\")\n if self.timed(testid):\n lines.append(\"{}call cpu_time(fpy_start)\".format(spacer))\n\n #This is either a call to a subroutine or a function. We need to make\n #sure that we handle any mapping tags or call tags in the docstrings\n if type(self.executable).__name__ == \"Subroutine\":\n prefix = \"call \" \n else:\n #For a function, we still need to save the value somewhere so we\n #can compare it.\n pntr = \">\" if \"pointer\" in [m.lower() for m in self.executable.modifiers] else \"\"\n prefix = \"{}_fpy ={} \".format(self.executable.name, pntr)\n\n spacing = len(list(prefix)) + len(list(self.executable.name)) + len(spacer)\n\n #Unfortunately, the fpy_auxiliary module freaks out if we call a public\n #method directly which is *also* a module procedure for the type (since\n #the .mod files vary because of the extra public subroutine on the type,\n #the compiler dies with Abort trap 6 error. So, we need to call embedded\n #methods using the %-syntax.\n if not self.executable.is_type_target:\n callname = self.executable.name\n xtype = None\n else:\n xtype = self.executable.is_type_target\n for emname, emexec in xtype.executables.items():\n if emexec.target is self.executable:\n callname = emname\n break\n else:\n callname = self.executable.name\n \n if \"paramlist\" in self.attributes:\n #The developer has decided on an alternate call signature from\n #the one that gets auto-generated.\n specified = re.split(\",\\s*\", self.attributes[\"paramlist\"])\n if xtype is not None and len(specified) == len(self.executable.ordered_parameters):\n msg.warn(\"Explicit parameter list for embedded executable should *exclude* the \"\n \"reference to 'self' in the first argument.\")\n cleaned = present_params(specified, spacing, 90)\n lines.append(\"{}{}{}({})\".format(spacer, prefix, callname, cleaned))\n else:\n #We can construct the actual list of parameters to use in the call\n #and substitute mappings where appropriate.\n calllist = []\n for ip, param in enumerate(self.executable.ordered_parameters):\n #Because of ignorable parameters, if the parameter is optional, explicitly\n #specify its name.\n if \"optional\" in param.modifiers:\n optstr = \"{}=\".format(param.name)\n else:\n optstr = \"\"\n \n if self.group is not None and param in self.group.mappings:\n #The first parameter name will also be the name of the variable that gets\n #created and called.\n if xtype is not None and ip == 0:\n callname = \"{}%{}\".format(self.group.mappings[param], callname)\n else:\n calllist.append(optstr + self.group.mappings[param])\n else:\n var = None\n pname = param.name.lower()\n #The test specification takes precedence over the testing group for variables.\n if self.test is not None and pname in self.test.variables:\n var = self.test.variables[pname]\n if var is None and self.group is not None and pname in self.group.variables:\n var = self.group.variables[pname]\n \n if xtype is not None and ip == 0:\n callname = \"{}%{}\".format(param.name, callname)\n else:\n if var is not None and not var.ignore:\n calllist.append(optstr + param.name)\n elif var is None:\n calllist.append(optstr + param.name)\n\n lines.append(\"{}{}{}({})\".format(spacer, prefix, callname,\n present_params(calllist, spacing, 90))) \n\n if self.timed(testid):\n lines.append(\"{}call cpu_time(fpy_end)\".format(spacer))\n lines.append(\"{}fpy_elapsed = fpy_elapsed + fpy_end - fpy_start\".format(spacer))", "def get_dnsperf_pid_list(self): \n pid_list = [] \n call(\"ps -C dnsperf | grep dnsperf | tr -c '0123456789 \\n' '?' | cut -d '?' -f1 | tr -d ' ' > \" + \n self.paths['TEMP_FOLDER_PATH'] + \"dnsperf-pid.dat\", shell=True) \n f = open(self.paths['TEMP_FOLDER_PATH'] + 'dnsperf-pid.dat').readlines()\n if f:\n for line in f:\n pid_list.append(line.rstrip())\n else:\n self.write_to_log('>> ERROR: the process dnsperf is not alive.\\n')\n self.s.sendto ('ERROR: the process dnsperf is not alive.', self.addr) \n return []\n \n return pid_list", "def matching_pids(match_func) -> Iterable[int]:\n for proc in psutil.process_iter(): # python api for ps -aux\n with proc.oneshot():\n try:\n cmd = proc.cmdline()\n except Exception:\n continue\n\n # ['python', './manage.py', 'table_heartbeat', '6a7d6689']\n if len(cmd) >= 3 and match_func(proc, cmd):\n yield proc.pid", "def analyze_logons(self,task_data):\n \n gid = task_data['gid']\n result = {}\n result['gid'] = gid\n \n LOGON_NTLM = False\n \n for l in task_data['log']: \n \n # match the very first meaningful log line\n # e.g.\n # dcagent packet: removed from queue, called:1428633 remain:0\n m = r_func_dcadgent_remove_q.match(l)\n if m:\n c = m.group('called')\n \n # add 'called' into analyzer stack \n if ('called' in m.groupdict().keys()):\n task_data['an']['called'] = m.groupdict()['called']\n logger_analyzer.debug(\"analyze_logons: r_func_dcadgent_remove_q: [called]: %s\" % (task_data['an']['called'],))\n\n \n if c not in self.chain['called'].keys():\n logger_analyzer.debug(\"analyze_logons: called ID '%s' not found! Skipping.\" % (c,))\n # FIXME: this could be handled more elegant way: the logon event which cannot be paired \n # will be marked as <incomplete>\n continue\n \n if 'called' not in result: result = Analyzer.stack_dict( result, {'called':c})\n continue\n\n \n m = r_func_ntlm_remove_q.match(l)\n if m:\n LOGON_NTLM = True\n \n c = m.group('called')\n \n # add 'called' into analyzer stack \n if ('called' in m.groupdict().keys()):\n task_data['an']['called'] = m.groupdict()['called']\n logger_analyzer.debug(\"analyze_logons: : r_func_ntlm_remove_q: [called]: %s\" % (task_data['an']['called'],))\n \n \n if c not in self.chain['called'].keys():\n logger_analyzer.debug(\"analyze_logons: called ID '%s' not found! Skipping.\" % (c,))\n # FIXME: this could be handled more elegant way: the logon event which cannot be paired \n # will be marked as <incomplete>\n continue\n \n if 'called' not in result: result = Analyzer.stack_dict( result, {'called':c})\n continue \n \n # match logon event with extra IP -- MATCH BEFORE without extra\n # logon event(1428633): len:49 dc_ip:10.81.0.41 time:1359606186 len:32 \n # data:NB0036.lpti.le.grp/PC_D01/TPT090 ip:10.81.12.110:10.81.3.163\n m = r_func_logon_event_ex.match(l)\n if m:\n logger_analyzer.debug(\"analyze_logons: func_logon_event_1 [extra ip]: %s\" % (l,))\n #result.update(m.groupdict())\n result = Analyzer.stack_dict(result,m.groupdict())\n #pprint(m.groupdict())\n continue\n \n # match logon event with single IP, eg\n # e.g.\n # logon event(1428635): len:43 dc_ip:10.81.0.41 time:1359606186 len:31 \n # data:T1288.lpti.le.grp/PC_D01/TPT009 ip:10.81.10.67\n m = r_func_logon_event.match(l)\n if m:\n # update the result dict by the match\n logger_analyzer.debug(\"analyze_logons: func_logon_event: %s\" % (l,))\n #result.update(m.groupdict())\n result = Analyzer.stack_dict(result,m.groupdict())\n continue\n \n # \n m = r_func_new_logon_0.match(l)\n if m:\n # update the result dict by the match\n logger_analyzer.debug(\"analyze_logons: func_new_logon_0: %s\" % (l,))\n #result.update(m.groupdict())\n result = Analyzer.stack_dict(result,m.groupdict())\n continue \n \n m = r_func_new_logon_1.match(l)\n if m:\n # update the result dict by the match\n logger_analyzer.debug(\"analyze_logons: func_new_logon_1: %s\" % (l,))\n #result.update(m.groupdict())\n result = Analyzer.stack_dict(result,m.groupdict())\n continue \n \n m = r_func_dns_query.match(l)\n if m:\n # update the result dict by the match\n logger_analyzer.debug(\"analyze_logons: func_dns_query: %s\" % (l,))\n #result.update(m.groupdict())\n result = Analyzer.stack_dict(result,m.groupdict())\n continue \n \n m = r_func_dns_cannot_resolve.match(l)\n if m:\n # update the result dict by the match\n logger_analyzer.debug(\"analyze_logons: func_dns_cannot_resolve: %s\" % (l,))\n #result.update(m.groupdict())\n result = Analyzer.stack_dict(result,m.groupdict())\n continue \n \n\n # ANALYZE NTLM LOGON EVENT\n if LOGON_NTLM:\n m = r_func_ntlm_user.match(l) \n if m:\n # update the result dict by the match\n logger_analyzer.debug(\"analyze_logons: func_ntlm_user: %s\" % (l,))\n #result.update(m.groupdict())\n result = Analyzer.stack_dict(result,m.groupdict())\n continue \n\n m = r_func_ntlm_wksta.match(l) \n if m:\n # update the result dict by the match\n logger_analyzer.debug(\"analyze_logons: func_ntlm_wksta: %s\" % (l,))\n #result.update(m.groupdict())\n result = Analyzer.stack_dict(result,m.groupdict())\n continue \n\n m = r_func_ntlm_domain.match(l) \n if m:\n # update the result dict by the match\n logger_analyzer.debug(\"analyze_logons: func_ntlm_domain: %s\" % (l,))\n #result.update(m.groupdict())\n result = Analyzer.stack_dict(result,m.groupdict())\n continue \n m = r_func_ntlm_seq.match(l)\n if m:\n # update the result dict by the match\n logger_analyzer.debug(\"analyze_logons: func_ntlm_seq: %s\" % (l,))\n #result.update(m.groupdict())\n result = Analyzer.stack_dict(result,m.groupdict())\n continue \n \n \n \n #pprint(result)\n return result", "def findChildProcessnames(pid):\n\n command = \"/bin/ps -e --no-headers -o pid -o ppid -o fname\"\n\n output = executeCommand(command)\n #print \"ps output: %s\" % output\n\n \n pieces = []\n procnames = {}\n for line in output.split(\"\\n\"):\n pieces= line.split()\n try: \n value=int(pieces[1])\n except Exception,e:\n #print \"trouble interpreting ps output %s: \\n %s\" % (e,pieces)\n continue\n if value==pid:\n try:\n job=int(pieces[0])\n except ValueError,e:\n #print \"trouble interpreting ps output %s: \\n %s\" % (e,pieces[0])\n continue\n# result.append(job)\n procnames[job]=pieces[2]\n \n# for item in output.split():\n# try:\n# value = int(item)\n# except ValueError,e:\n# print \"trouble interpreting ps output %s: \\n %s \\n\" % (e,item,output)\n# continue\n# result.append(value)\n return procnames", "def _set_array_pid(self):\n for array in self.arrays:\n array.set_pid(self.rank)", "def kill_processes(self, *Proc):\n stdout = _return_pslist()\n pat=re.compile(r'\\w+')\n psDict = {}\n PIDlist = []\n not_found_list = []\n # build mapping of process name to the list of associated process ids.\n # the pslist process name is currently limited to display first 15 chars, so we will compare only 15 chars\n for ps in stdout.splitlines():\n psInfo = ps.split()\n if len(psInfo) < 2:\n continue\n # get just the process name, remove everything after first non-alphanumeric\n psProcessName = re.match(pat,psInfo[1]).group()\n if psProcessName != None:\n psCompareProcessName = psProcessName.lower()[:15]\n if psCompareProcessName in psDict:\n psDict[psCompareProcessName].append(psInfo[0])\n else:\n psDict[psCompareProcessName] = [psInfo[0]]\n for process in list(Proc):\n compareProcessName = process.lower()[:15]\n if compareProcessName in psDict:\n PIDlist.extend(psDict[compareProcessName])\n else:\n not_found_list.append(process)\n if len(PIDlist):\n _kill_process(PIDlist)\n if len(not_found_list):\n return [12,not_found_list]\n else:\n return [0, []]", "def get_pids(self, class_id):\n\n count = ctypes.c_uint(0)\n count_ref = ctypes.byref(count)\n\n restype = ctypes.POINTER(ctypes.c_uint)\n self.pqos.lib.pqos_pid_get_pid_assoc.restype = restype\n p_pids = self.pqos.lib.pqos_pid_get_pid_assoc(class_id, count_ref)\n\n if p_pids:\n pids = [p_pids[i] for i in range(count.value)]\n free_memory(p_pids)\n else:\n pids = []\n\n return pids", "def plotTrace(self, ids = None, depth = 0, stage = all, valid = False, color = all):\n\n data = self.data(label = ['x', 'y'], stage = stage, valid = valid);\n \n uids = np.unique(ids);\n \n if color is all:\n c = cm.rainbow;\n n = len(uids);\n color = [np.array(c(c.N * i/(n-1)))[:-1] for i in range(n)];\n\n #lines\n plt.plot(data[:,0], data[:,1], color = 'black'); \n \n legs = [];\n for k,i in enumerate(uids):\n ii = np.where(ids == i)[0];\n if depth > 0:\n ii = [ii-d for d in range(depth)];\n ii = np.unique(np.concatenate(ii));\n \n plt.plot(data[ii, 0], data[ii, 1], '.', color = color[k]);\n\n legs.append(mpatches.Patch(color=color[k], label= str(i)));\n \n plt.legend(handles=legs);", "def __getitem__(self, *args) -> \"tid_t\":\n return _ida_pro.tid_array___getitem__(self, *args)", "def collect_id_as_array(event):\n return [event.get('trace_id')]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The interface for selecting the process to monitor
def selectProcessID(): processes = [] dbg = pydbg() #Gather an array of active processes for (pid, pname) in dbg.enumerate_processes(): processes.append([pid, pname]) print "\n=== Please pick a process to monitor ===\n" print "Choice Process Name" counter = 0 #Prepare a choice list for the user for item in processes: print "[%3d] %s" %(counter, item[1]) counter += 1 while 1: try: index = int(raw_input("\nChoice [n]:")) if index < 0 or index >= counter: raise break except KeyboardInterrupt: sys.exit(-1) except: print "That is not a choice." dbg = None return processes[index][0] #Return the process ID of the user's choosing
[ "def launch(self):\n self.processdev.start()\n pid = self.processdev.pid\n p = psutil.Process(self.processdev.pid)\n p.nice(psutil.HIGH_PRIORITY_CLASS)\n print(str(pid) + \"est le pid\")", "def monitor(self, listener=None):\r\n\r\n if not self._process_watcher:\r\n self._process_watcher = ProcessWatcher(self.loop, self)\r\n\r\n self._process_watcher.subscribe(listener)", "def watch(self):\n\n while not self.pid:\n for (pid, name) in self.dbg.enumerate_processes():\n # ignore the optionally specified PID.\n if pid == self.ignore_pid:\n continue\n\n if name.lower() == self.proc_name.lower():\n self.pid = pid\n break\n\n self.process_monitor.log(\"debugger thread-%s found match on pid %d\" % (self.getName(), self.pid))", "def process_get(self, pid):\n if not isinstance(pid, baseinteger):\n raise TypeError(\"pid can only be an instance of type baseinteger\")\n guest_process = self._call(\"processGet\",\n in_p=[pid])\n guest_process = IGuestProcess(guest_process)\n return guest_process", "def watch(ctx, args):\n\n if not args.pid:\n raise NotImplementedError(\n \"We currently have no idea how to determine which \"\n \"is the current running operation. Please provide a PID while \"\n \"we figure things out!\")\n\n run = _run_for_pid_arg(args.pid)\n _watch_run(run)", "def service_monitor_selector(self) -> Optional[pulumi.Input['PrometheusSpecServiceMonitorSelectorArgs']]:\n return pulumi.get(self, \"service_monitor_selector\")", "def _select_monitor(self, context, server_list):\n # Clean old configuration files and data.\n # Find one monitor to do this job.\n # Just monitor node can do this job.\n monitor_list = [x for x in server_list if x['is_monitor']]\n\n if len(monitor_list) == 0:\n LOG.error('Can not find monitor_list')\n try:\n raise MonitorAddFailed\n except Exception, e:\n LOG.error(\"%s: %s\" %(e.code, e.message))\n pool_default_size = db.vsm_settings_get_by_name(context,'osd_pool_default_size')\n pool_default_size = int(pool_default_size.value)\n if len(monitor_list) < pool_default_size:\n LOG.error('There must be at least %s monitors.'%pool_default_size)\n self._update_server_list_status(context,\n server_list,\n \"Error: monitors < %s\"%pool_default_size)\n try:\n raise MonitorAddFailed\n except Exception, e:\n LOG.error(\"%s: %s\" %(e.code, e.message))\n raise\n\n LOG.info(' monitor_list = %s' % monitor_list)\n if len(monitor_list) == 1:\n idx = 0\n else:\n idx = random.randint(0, len(monitor_list)-1)\n LOG.info(' select monitor = %d' % idx)\n job_server = monitor_list[idx]\n return job_server", "def select_processor():\n\n return globals()[f'{args.task}Processor']()", "def system_monitor():\n r1 = env.run(SUDO_INSTALL + \"gir1.2-gtop-2.0 gir1.2-networkmanager-1.0\")\n return r1", "def pid(self):\n process_id = None\n output = Shell.ps('-ax')\n for line in output.split(\"\\n\"):\n\n if 'mongod' in line and \"--port\" in line:\n process_id = line.split(\" \")[0]\n return process_id\n\n return process_id", "def _as_process(self):\n pid = self.pid\n if not pid:\n raise self.NotStarted()\n return psutil.Process(pid)", "def OnGetInfo(self, event): # wxGlade: ProcessManagerPanel.<event_handler>\n item = self.process_list_ctrl.GetFocusedItem()\n pid = int(self.process_list_ctrl.GetItemText(item))\n type, info = self.procManager.getProcessInfo(pid)\n if type==\"AnalyticDisp\" or type==\"NumericDisp\" or type==\"AnalyticCrossSec\":\n panel = showEditorWindow(self, \"Files being used by process: \" + str(pid), allowEditting = False)\n panel.loadInteractions(info[0])\n panel.loadSpins(info[1])\n if type==\"Fit\":\n showParamListFrame(info, str(pid) + \" Fit Snapshot\")\n else:\n print \"Info for this type of process not implemented!\"\n event.Skip()", "def me(self):\r\n myPid = os.getpid()\r\n myProcess = psutil.Process(myPid)\r\n return self.parseProcess(myProcess)", "def _cfg_monitor(self):\n self._cfg(\"monitor\")", "def start_cli_at_background_and_return_linux_pid(command): \n out = connections.execute_cli(command)\n result_list = out.split()\n return result_list[1].strip()", "def fork_and_monitor( self, args ):\n while True :\n self.pa.logdebug( \"Forking monitor ...\" )\n pid = os.fork()\n if pid == 0 : # child process\n cmdargs = sys.argv[:]\n cmdargs.remove( '-m' )\n cmdargs.append( os.environ )\n h.reseed_random()\n os.execlpe( sys.argv[0], *cmdargs )\n\n else : # parent \n try :\n pid, status = os.wait()\n if status & 0xFF00 != 0x300 :\n sys.exit( status )\n except KeyboardInterrupt :\n sys.exit(0)", "def subscribe_process_status(self):\n self.subscribe(EventHeaders.PROCESS)", "def cur_process(self):\n \n assert self._theproc is None or \\\n self._theproc.state == _Process.STATE_RUNNING\n return self._theproc", "def launch ():\n get_network_info()\n core.registerNew(job_aware_switch)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read portfolio from JSON file.
def read_json_portfolio(portfolio_file: str) -> dict: with open(portfolio_file, "r") as p_file: return json.load(p_file)
[ "def load_json(path):\n with open(path, 'r') as f:\n new_projects = json.load(f)\n\n return new_projects", "def _read_json(self,fname):\n\n with open(fname) as f:\n data = json.load(f)\n\n return data", "def read_impact_data_json(self):\n try:\n json_file = self.impact_layer.upload_session.layerfile_set.get(\n file__endswith=\".json\")\n impact_data = json.loads(json_file.file.read())\n return impact_data\n except LayerFile.DoesNotExist:\n return {}", "def __read(path):\n with open(path, \"r\") as f:\n data = json.load(f)\n logger = __logger()\n logger.info(\"Read the whole json, from path: {}.\".format(path))\n return data", "def loadPortfolio(self):\n\n # load the db from file\n db = TinyDB(self.portfolioDBFile)\n\n\n # for each asset in the db\n for asset in db:\n\n\n if asset['assetType'] == 'COMMON':\n\n # create the asset\n newAsset = st.CommonStock(asset['assetID'],\n asset['purchaseDate'],\n asset['purchasePrice'],\n asset['saleDate'],\n asset['salePrice'],\n asset['volume'],\n asset['percentOwnership'],\n asset['priceFeedRef'],\n asset['priceFeedType'])\n\n elif asset['assetType'] == 'PREFFERED':\n\n # create the asset\n newAsset = st.PreferredStock(asset['assetID'],\n asset['purchaseDate'],\n asset['purchasePrice'],\n asset['saleDate'],\n asset['salePrice'],\n asset['volume'],\n asset['percentOwnership'],\n asset['priceFeedRef'],\n asset['priceFeedType'])\n\n\n # append the nes asset to the list of assets in the portfolio\n self.assets.append(newAsset)", "def jsonread(filename): \n res = None", "def _load_json():\n\n with open(\"data/json/0a234fea9682454facab730c0a7f83f0.json\") as json_file:\n pipedesign_json = json.load(json_file)\n\n return pipedesign_json", "def readSat(sat, pathToJSONDataFiles):\r\n\r\n\t# data file path\r\n\tfileName = sat + \"-satellite.json\"\r\n\tfilePath = pathToJSONDataFiles + \"/\" + fileName\r\n\r\n\t# open the file\r\n\ttry:\r\n\t\tfileHandle = open(filePath)\r\n\texcept IOError:\r\n\t\tprint(\"ERROR: Unable to open the file \" + filePath)\r\n\t\traise IOError\r\n\r\n\t# read the file\r\n\tdata = json.load(fileHandle)\r\n\r\n\treturn data", "def read_json_data(storage_file: str):\n with open(storage_file, 'r') as f:\n data = json.load(f)\n return data", "def loadAndCleanPortfolio():\n\n portfolio = pd.read_json('data/portfolio.json', orient='records', lines=True)\n \n # Available channels\n channels = [\"email\", \"mobile\", \"social\", \"web\"]\n\n # Create a one hot encoding for channel type\n portfolioWithChannelsEncoded = portfolio.copy()\n for channel in channels:\n portfolioWithChannelsEncoded[channel] = portfolio[\"channels\"].apply(lambda cs: 1*(channel in cs))\n\n portfolio_df = portfolioWithChannelsEncoded.drop(columns=\"channels\")\n\n # Give a descriptive name\n portfolio_df[\"code\"] = portfolio_df[\"offer_type\"].str[0] + \".\" + \\\n portfolio_df[\"difficulty\"].astype(str) + \".\" + \\\n portfolio_df[\"reward\"].astype(str) + \".\" + \\\n portfolio_df[\"duration\"].astype(str)\n\n # Rename and order columns and rows\n portfolio_df = portfolio_df.rename(columns={\"id\": \"offer_id\", \"offer_type\": \"type\"})\n cols = [\"offer_id\",\"code\",\"type\",\"difficulty\",\"reward\",\"duration\",\"email\",\"mobile\",\"social\",\"web\"]\n portfolio_df = portfolio_df[cols]\n portfolio_df = portfolio_df.sort_values([\"type\", \"difficulty\", \"reward\", \"duration\"])\n\n return portfolio_df", "def read_in_json(path:str):\n with open(path) as jf:\n jf=json.load(jf)\n return jf", "def read_data():\n with open(\"stagnationPointNu.json\", \"r\") as ifile:\n data = json.load(ifile)\n return data", "def read(self):\n with open(self.filename) as json_file:\n data = json.load(json_file)\n if 'nodes' in data:\n nodes = data['nodes']\n for n in nodes:\n if 'host' in n:\n node = self.add_node(n['host'])\n if 'goal' in n:\n node.set_goal(n['goal'])\n if 'authentication' in data:\n auth = data['authentication']\n if 'username' in auth:\n self.usernm = auth['username']\n if 'password' in auth:\n self.passwd = auth['password']", "def load_rentals_file(filename):\n logging.debug(\"Loading input file %s...\", filename)\n\n try:\n with open(filename) as file:\n try:\n data = json.load(file)\n except ValueError:\n logging.error(\"Could not locate input file (value error)\")\n sys.exit()\n except FileNotFoundError:\n logging.error(\"Could not locate input file (file did not exist)\")\n sys.exit()\n\n return data", "def readCatalogue(self):\n\t\twith open(config['Fixed']['CatalogFile'], mode = 'r') as cat_file:\n\t\t\tcatalog = json.load(cat_file)\n\n\t\treturn catalog", "def load(self):\n if os.path.exists(PROJECTS):\n with open(PROJECTS, \"r\") as f:\n j = json.load(f)\n self.projects = [\n Project.create_from_dict(d)\n for d in j[\"projects\"]\n ]", "def read_level_data(filename):\n with open(filename, 'r') as f:\n return json.loads(f.read())", "def get_repos():\n try:\n with open(\"repos.json\") as data_file: \n repos = json.load(data_file)\n return repos\n except:\n print \"Error loading repos.json\"\n sys.exit()", "def read_data(filename):\n with open(filename) as fin:\n movies = [json.loads(l) for l in fin]\n\n return movies" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Store a rebalanced portfolio.
def store_rebalanced_portfolio(portfolio: dict, path: str): # Sort stocks by WKN portfolio["Stocks"] = sorted(portfolio["Stocks"], key=lambda x: x["WKN"]) with open(path, "w") as file_: json.dump(portfolio, file_, indent=4)
[ "def backtest_portfolio(self):\n\n # Construct the portfolio DataFrame to use the same index\n # as 'positions' and with a set of 'trading orders' in the\n # 'pos_diff' object, assuming market open prices.\n portfolio = self.positions*self.bars['Open']\n pos_diff = self.positions.diff()\n\n # Create the 'holdings' and 'trades' by running through the trades\n # and adding/substracting the relevant quantity from each column\n\n portfolio['holdings'] = (self.positions*self.bars['Open']).sum(axis=1)\n portfolio['cash'] = self.initial_capital - (pos_diff*self.bars['Open']).sum(axis=1).cumsum()\n \n # Finalise the total and bar-based returns based on the 'cash'\n # and holdings figures for the portfolio\n portfolio['total'] = portfolio['cash'] + portfolio['holdings']\n portfolio['returns'] = portfolio['total'].pct_change()\n return portfolio", "def save_portfolio():\n name = request.form.get(\"portfolio_name\")\n print name\n print session[\"portfolio\"]\n print session[\"user_id\"]\n for item in session[\"portfolio\"]:\n pid = '\"'+item+'\"'\n p = model.Portfolio()\n p.donor_id = session[\"user_id\"]\n p.project_id = pid\n p.portfolio_title = name\n modelsession.add(p)\n modelsession.commit()\n session[\"portfolio\"] = []\n flash(\"Portfolio successfully saved\")\n return redirect(\"/confirm_portfolio\")", "def update_portfolio_data(paper_name, price, tick_date):\n portfolio_by_month[paper_name].append(price)\n portfolio_by_month_dates.add(tick_date)", "def backtest_portfolio(self):\r\n\r\n # Set the portfolio object to have the same time period\r\n # as the positions DataFrame\r\n portfolio = pd.DataFrame(index=self.positions.index)\r\n pos_diff = self.positions.diff()\r\n\r\n # Work out the intraday profit of the difference\r\n # in open and closing prices and then determine\r\n # the daily profit by longing if an up day is predicted\r\n # and shorting if a down day is predicted\r\n portfolio['price_diff'] = self.bars['Close'] - self.bars['Open']\r\n portfolio['price_diff'][0:5] = 0.0\r\n portfolio['profit'] = self.positions[self.symbol] * portfolio['price_diff']\r\n\r\n # Generate the equity curve and percentage returns\r\n portfolio['total'] = self.initial_capital + portfolio['profit'].cumsum()\r\n portfolio['returns'] = portfolio['total'].pct_change()\r\n return portfolio", "def rebalance_portfolio(self):\n orders = []\n for asset in self.portfolio:\n # Get all of our variables from portfolio\n symbol = asset.get(\"symbol\")\n weight = asset.get(\"weight\")\n last_price = asset.get(\"last_price\")\n\n # Get how many shares we already own\n # (including orders that haven't been executed yet)\n quantity = self.get_asset_potential_total(symbol)\n if quantity:\n logging.info(\n \"Asset %s shares value: %.2f$. %.2f$ per %d shares.\"\n % (symbol, quantity * last_price, last_price, quantity)\n )\n\n # Calculate how many shares we need to buy or sell\n shares_value = self.portfolio_value * weight\n new_quantity = shares_value // last_price\n quantity_difference = new_quantity - quantity\n logging.info(\n \"Weighted %s shares value with %.2f%% weight: %.2f$. %.2f$ per %d shares.\"\n % (symbol, weight * 100, shares_value, last_price, new_quantity)\n )\n\n # If quantity is positive then buy, if it's negative then sell\n side = \"\"\n if quantity_difference > 0:\n side = \"buy\"\n elif quantity_difference < 0:\n side = \"sell\"\n\n # Execute the order if necessary\n if side:\n order = self.create_order(symbol, abs(quantity_difference), side)\n orders.append(order)\n asset[\"quantity\"] = new_quantity\n\n self.submit_orders(orders)", "def save_portfolio_event(self, portfolio_id: str, filepath: Path) -> None:\n logging.info(\n \"TradingMate - save portfolio {} to {}\".format(portfolio_id, filepath)\n )\n for pf in self._portfolios:\n if pf.get_id() == portfolio_id:\n pf.save_portfolio(filepath)", "def loadPortfolio(self):\n\n # load the db from file\n db = TinyDB(self.portfolioDBFile)\n\n\n # for each asset in the db\n for asset in db:\n\n\n if asset['assetType'] == 'COMMON':\n\n # create the asset\n newAsset = st.CommonStock(asset['assetID'],\n asset['purchaseDate'],\n asset['purchasePrice'],\n asset['saleDate'],\n asset['salePrice'],\n asset['volume'],\n asset['percentOwnership'],\n asset['priceFeedRef'],\n asset['priceFeedType'])\n\n elif asset['assetType'] == 'PREFFERED':\n\n # create the asset\n newAsset = st.PreferredStock(asset['assetID'],\n asset['purchaseDate'],\n asset['purchasePrice'],\n asset['saleDate'],\n asset['salePrice'],\n asset['volume'],\n asset['percentOwnership'],\n asset['priceFeedRef'],\n asset['priceFeedType'])\n\n\n # append the nes asset to the list of assets in the portfolio\n self.assets.append(newAsset)", "def set_fund_portfolio(self,\n fund_id: str,\n portfolio: FundPortfolio) -> bool:\n try:\n self.client.hset('fund_portfolio', fund_id, str(portfolio))\n return True\n except Exception as e:\n print(f'Error setting fund portfolio for \"{fund_id}\" from Redis: {traceback.format_exc()}')\n return False", "def create_portfolio(self, name):\n url = f'{self.API_URL}/portfolios/'\n data = {\n 'name': name,\n 'portfolio_currency': 'USD'\n }\n headers = {\n 'accept': 'application/json',\n 'Authorization': self._token,\n }\n\n try:\n response = requests.request(\"POST\", url, headers=headers, data=data)\n if response.status_code == 201:\n data = response.json()\n portfolio_id = data['id']\n else:\n return None, [f'Expected 201 response from BETTER but got {response.status_code}: {response.content}']\n except Exception as e:\n return None, [f'Unexpected error creating BETTER portfolio: {e}']\n\n return portfolio_id, []", "def backtest_portfolio(self):\n raise NotImplementedError(\"backtest_portfolio() method needs to be\" \\\n \"implemented!\")", "def add_item(self, portfolio, params={}, **options):\n path = \"/portfolios/%s/addItem\" % (portfolio)\n return self.client.post(path, params, **options)", "def addStock(self, stock_id, quantity , unit_price, commission_price, date, trans_type):\n self.conn.execute(\n \"\"\"INSERT INTO portfolio (stock_id, quantity , unit_price, commission_price, date, trans_type) values (?,?,?,?,?,?) \"\"\",\n (stock_id, int(quantity) , unit_price, commission_price, date, trans_type))\n\n if trans_type.lower() == \"sell\":\n self.addUninvested(float((int(quantity) * float(unit_price)) + float(commission_price)))\n elif trans_type.lower() == \"buy\":\n self.subtractUninvested(float((int(quantity) * float(unit_price)) + float(commission_price)))\n self.conn.commit()", "def index():\n\n # Identify current user by id\n current_user = db.execute(\"SELECT cash FROM users where id = :curr_user\", curr_user=session[\"user_id\"])\n\n # Look up stock info\n stock_list = db.execute(\"SELECT symbol, share_price, SUM(share_qty) as share_total FROM transactions WHERE user_id = :curr_user GROUP BY symbol HAVING share_total > 0\",\n curr_user=session[\"user_id\"])\n\n # Create portfolio\n portfolio = {}\n\n for stock in stock_list:\n portfolio[stock[\"symbol\"]] = lookup(stock[\"symbol\"])\n\n # current balance\n balance = current_user[0][\"cash\"]\n\n\n return render_template(\"portfolio.html\", portfolio=portfolio, stock_list=stock_list, balance=balance)", "def update(self, portfolio, params={}, **options):\n path = \"/portfolios/%s\" % (portfolio)\n return self.client.put(path, params, **options)", "def _portfolio_data(self, nodes, date):\n errors = []\n self.operating_currency = self.ledger.options[\"operating_currency\"][0]\n\n types = [\n (\"portfolio_total\", str(Decimal)),\n (\"asset_classes\", str(dict)),\n (\"portfolio_allocation\", str(DecimalPercent)),\n (\"asset_class_total\", str(Decimal)),\n (\"asset_subclasses\", str(dict)),\n (\"asset_class_allocation\", str(DecimalPercent)),\n (\"asset_subclass_total\", str(Decimal)),\n (\"accounts\", str(AccountsDict)),\n (\"asset_subclass_allocation\", str(DecimalPercent)),\n (\"balance_market_value\", str(Decimal)),\n (\"income_gain_loss\", str(DecimalIncomeGainLoss)),\n (\"gain_loss_percentage\", str(DecimalPercentGainLoss)),\n (\"latest_price_date\", str(datetime.date)),\n ]\n\n portfolio_tree = {}\n portfolio_tree[\"portfolio_total\"] = ZERO\n portfolio_tree[\"asset_classes\"] = {}\n for node in nodes:\n account_name = node.name\n commodity = node_commodity(node)\n if (commodity in self.commodity_dict) and (\n \"asset-class\" in self.commodity_dict[commodity].meta\n ):\n asset_class = self.commodity_dict[commodity].meta[\"asset-class\"]\n else:\n asset_class = \"noclass\"\n\n if (commodity in self.commodity_dict) and (\n \"asset-subclass\" in self.commodity_dict[commodity].meta\n ):\n asset_subclass = self.commodity_dict[commodity].meta[\"asset-subclass\"]\n else:\n asset_subclass = \"nosubclass\"\n\n if asset_class not in portfolio_tree[\"asset_classes\"]:\n portfolio_tree[\"asset_classes\"][asset_class] = {}\n portfolio_tree[\"asset_classes\"][asset_class][\n \"portfolio_allocation\"\n ] = ZERO\n portfolio_tree[\"asset_classes\"][asset_class][\"asset_class_total\"] = ZERO\n portfolio_tree[\"asset_classes\"][asset_class][\"asset_subclasses\"] = {}\n if (\n asset_subclass\n not in portfolio_tree[\"asset_classes\"][asset_class][\"asset_subclasses\"]\n ):\n portfolio_tree[\"asset_classes\"][asset_class][\"asset_subclasses\"][\n asset_subclass\n ] = {}\n portfolio_tree[\"asset_classes\"][asset_class][\"asset_subclasses\"][\n asset_subclass\n ][\"asset_subclass_total\"] = ZERO\n portfolio_tree[\"asset_classes\"][asset_class][\"asset_subclasses\"][\n asset_subclass\n ][\"portfolio_allocation\"] = ZERO\n portfolio_tree[\"asset_classes\"][asset_class][\"asset_subclasses\"][\n asset_subclass\n ][\"asset_subclass_asset_class_allocation\"] = ZERO\n portfolio_tree[\"asset_classes\"][asset_class][\"asset_subclasses\"][\n asset_subclass\n ][\"accounts\"] = {}\n\n # Insert account-level balances and\n # Sum totals for later calculating allocation\n account_data = {}\n # Get balance market value at today's date, if possible.\n\n # Calculate cost\n account_cost_conv = self._convert_cost(node, date)\n account_cost_node = {account_cost_conv.currency: account_cost_conv.number}\n\n if self.operating_currency in account_cost_node:\n\n account_cost = account_cost_node[self.operating_currency]\n latest_price = self._account_latest_price(node)\n if latest_price is None or latest_price[0] is None:\n latest_price_date = None\n account_balance_market_value = account_cost\n # assume there's no gain loss\n account_data[\"balance_market_value\"] = account_cost\n account_data[\"income_gain_loss\"] = None\n account_data[\"gain_loss_percentage\"] = None\n account_data[\"latest_price_date\"] = None\n else:\n latest_price_date = latest_price[0]\n (\n account_balance_market_value,\n account_income_gain_loss_unrealized,\n account_gain_loss_unrealized_percentage,\n ) = self._asset_info(node, date)\n\n account_data[\"balance_market_value\"] = account_balance_market_value\n account_data[\n \"income_gain_loss\"\n ] = account_income_gain_loss_unrealized\n account_data[\n \"gain_loss_percentage\"\n ] = account_gain_loss_unrealized_percentage\n account_data[\"latest_price_date\"] = latest_price_date\n\n portfolio_tree[\"asset_classes\"][asset_class][\"asset_subclasses\"][\n asset_subclass\n ][\"accounts\"][account_name] = account_data\n\n # Accumulate sums\n portfolio_tree[\"portfolio_total\"] += account_balance_market_value\n portfolio_tree[\"asset_classes\"][asset_class][\n \"asset_class_total\"\n ] += account_balance_market_value\n portfolio_tree[\"asset_classes\"][asset_class][\"asset_subclasses\"][\n asset_subclass\n ][\"asset_subclass_total\"] += account_balance_market_value\n\n elif len(account_cost_node) == 0:\n # Assume account is empty\n account_data[\"balance_market_value\"] = ZERO\n account_data[\"income_gain_loss\"] = ZERO\n account_data[\"gain_loss_percentage\"] = ZERO\n account_data[\"latest_price_date\"] = None\n portfolio_tree[\"asset_classes\"][asset_class][\"asset_subclasses\"][\n asset_subclass\n ][\"accounts\"][account_name] = account_data\n else:\n errors.append(\n \"account \"\n + account_name\n + \" has balances not in operating currency \"\n + self.operating_currency\n )\n\n # Now that account balances and totals are calculated,\n # Traverse and calculate portfolio-level info.\n for asset_class in portfolio_tree[\"asset_classes\"]:\n asset_class_dict = portfolio_tree[\"asset_classes\"][asset_class]\n\n asset_class_dict[\"portfolio_allocation\"] = (\n ZERO\n if portfolio_tree[\"portfolio_total\"] == ZERO\n else round(\n (\n asset_class_dict[\"asset_class_total\"]\n / portfolio_tree[\"portfolio_total\"]\n )\n * 100,\n 2,\n )\n )\n\n for asset_subclass in asset_class_dict[\"asset_subclasses\"]:\n asset_subclass_dict = asset_class_dict[\"asset_subclasses\"][\n asset_subclass\n ]\n\n asset_subclass_dict[\"portfolio_allocation\"] = (\n ZERO\n if portfolio_tree[\"portfolio_total\"] == ZERO\n else round(\n (\n asset_subclass_dict[\"asset_subclass_total\"]\n / portfolio_tree[\"portfolio_total\"]\n )\n * 100,\n 2,\n )\n )\n\n asset_subclass_dict[\"asset_class_allocation\"] = (\n ZERO\n if asset_class_dict[\"asset_class_total\"] == ZERO\n else round(\n (\n asset_subclass_dict[\"asset_subclass_total\"]\n / asset_class_dict[\"asset_class_total\"]\n )\n * 100,\n 2,\n )\n )\n\n for account in asset_subclass_dict[\"accounts\"]:\n account_dict = asset_subclass_dict[\"accounts\"][account]\n\n account_dict[\"portfolio_allocation\"] = (\n ZERO\n if portfolio_tree[\"portfolio_total\"] == ZERO\n else round(\n (\n account_dict[\"balance_market_value\"]\n / portfolio_tree[\"portfolio_total\"]\n )\n * 100,\n 2,\n )\n )\n\n account_dict[\"asset_class_allocation\"] = (\n ZERO\n if asset_class_dict[\"asset_class_total\"] == ZERO\n else round(\n (\n account_dict[\"balance_market_value\"]\n / asset_class_dict[\"asset_class_total\"]\n )\n * 100,\n 2,\n )\n )\n\n account_dict[\"asset_subclass_allocation\"] = (\n ZERO\n if asset_subclass_dict[\"asset_subclass_total\"] == ZERO\n else round(\n (\n account_dict[\"balance_market_value\"]\n / asset_subclass_dict[\"asset_subclass_total\"]\n )\n * 100,\n 2,\n )\n )\n\n return portfolio_tree, types, errors", "def create_portfolio(port: str) -> Result:\n try:\n message = Portfolio.new(port)\n except AppError as e:\n return Result(success=False, message=\"create_portfolio: \"+str(e), severity=e.severity)\n else:\n return Result(success=True, message=message)", "def update_portfolio(self):\n\n try:\n r = self.session.get('http://www.marketwatch.com/game/%s/portfolio/holdings?name=null' % self.game)\n soup = BeautifulSoup(r.text, 'lxml')\n cubby_worth = soup.find('ul', {'class': 'cubby worth'})\n cubby_performance = soup.find('ul', {'class': 'cubby performance'})\n self.buyingpower = float(cubby_worth.find('span', {'class': 'data'}).getText()[1:].replace(',',''))\n self.networth = float(cubby_performance.find('span', {'class': 'data'}).getText()[1:].replace(',',''))\n except Exception, e:\n print \"ERROR in update_portfolio :: %s\" % e\n sleep(1)\n return self.update_portfolio()", "def rebalance_portfolio(self, signals):\n available_balance = self.current_portfolio['bitmex-BTC-available-balance']\n exchange = 'bitmex'\n new_order_events = []\n cancel_orders_events = []\n events = []\n default_position_size = self.default_position_size\n\n for sig in signals.events:\n sig.print_signal()\n price = self.data.get_latest_bar_value('bitmex', sig.symbol, \"close\")\n if not price:\n # Might want to throw an error here\n continue\n\n if sig.signal_type == \"EXIT\":\n cancel_open_orders = OrderEvent(exchange, sig.symbol, 'CancelAll')\n close_position_order = OrderEvent(exchange, sig.symbol, 'ClosePosition')\n cancel_orders_events.append(cancel_open_orders)\n new_order_events.append(close_position_order)\n else:\n direction = { 'LONG': 1, 'SHORT': -1 }[sig.signal_type]\n target_allocation = direction * default_position_size * sig.strength\n current_quantity = self.current_portfolio['bitmex-{}'.format(sig.symbol)]\n target_quantity = floor(target_allocation / price)\n side = 'buy' if (target_quantity - current_quantity) > 0 else 'sell'\n quantity = abs(target_quantity - current_quantity)\n\n print('TARGET ALLOCATION: {}'.format(target_allocation))\n print('PRICE: {}'.format(price))\n print('CURRENT QUANTITY: {}'.format(current_quantity))\n print('POSITION QUANTITY: {} for {}'.format(target_quantity, sig.symbol))\n\n if (target_allocation > available_balance):\n # Might want to throw an error here\n continue\n\n if (quantity == 0):\n # Might want to throw an error here\n continue\n\n order = OrderEvent(exchange, sig.symbol, 'Market', quantity, side, 1)\n precision = get_precision(sig.symbol)\n\n if side == 'buy':\n other_side = 'sell'\n stop_loss_px = truncate((1 - self.stop_loss_gap) * price, precision)\n take_profit_px = truncate((1 + self.take_profit_gap) * price, precision)\n elif side == 'sell':\n other_side = 'buy'\n stop_loss_px = truncate((1 + self.stop_loss_gap) * price, precision)\n take_profit_px = truncate((1 - self.take_profit_gap) * price, precision)\n\n stop_loss = OrderEvent(exchange, sig.symbol, 'StopLoss', quantity, other_side, 1, stop_loss_px)\n take_profit = OrderEvent(exchange, sig.symbol, 'TakeProfit', quantity, other_side, 1, take_profit_px)\n cancel_other_orders = OrderEvent(exchange, sig.symbol, 'CancelAll')\n\n new_order_events += [order, stop_loss, take_profit]\n cancel_orders_events.append(cancel_other_orders)\n\n events = cancel_orders_events + new_order_events\n return events", "def _portfolio_data(self, nodes):\n operating_currency = self.ledger.options[\"operating_currency\"][0]\n acct_type = (\"account\", str(str))\n bal_type = (\"balance\", str(Decimal))\n alloc_type = (\"allocation\", str(Decimal))\n types = [acct_type, bal_type, alloc_type]\n\n rows = []\n portfolio_total = Decimal()\n for node in nodes:\n row = {}\n row[\"account\"] = node.name\n balance = cost_or_value(node.balance)\n if operating_currency in balance:\n balance_dec = balance[operating_currency]\n portfolio_total += balance_dec\n row[\"balance\"] = balance_dec\n rows.append(row)\n\n for row in rows:\n if \"balance\" in row:\n row[\"allocation\"] = round(\n (row[\"balance\"] / portfolio_total) * 100, 2\n )\n\n return types, rows" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if a portfolio is valid.
def valid_portfolio(portfolio: dict) -> bool: ratio_sum = sum([stock["GoalRatio"] for stock in portfolio["Stocks"]]) if abs(1.0 - ratio_sum) > 1e-4: print(f"Goal ratios of stocks sum up to {ratio_sum} instead of 1.0") return False if any( [ stock["Price"] is None or stock["Price"] == 0.0 for stock in portfolio["Stocks"] ] ): print("Some stocks are missing price information") return False return True
[ "def enough_assets(portfolio, log=True):\n\n nb_assets = len(portfolio.values[\"2016-06-01\"])\n\n if nb_assets > 15 and nb_assets < 40:\n if log:\n print(\"La quantité d'action pour ce portfolio est correcte\")\n return True\n\n if log:\n print(\"La quantité d'action pour ce portfolio n'est PAS correcte\")\n return False", "def check_actions(portfolio, log=True):\n\n total_count = 0\n action_count = 0\n for asset_date in portfolio.values:\n for elt in portfolio.values[asset_date]:\n total_count += int(elt.asset.quantity)\n\n str_asset = get_asset(elt.asset.asset, \"TYPE\", asset_date)\n json_asset = json.loads(str_asset)\n if json_asset[\"TYPE\"][\"value\"] == \"STOCK\":\n action_count += elt.asset.quantity\n\n if total_count == 0 or action_count / total_count < 0.5:\n if log:\n print(\"Il n'y a PAS au moins 50% d'action dans ce portefeuille\")\n return False\n\n if log:\n print(\"Il y a au moins 50% d'action dans ce portefeuille\")\n return True", "def check_nav(portfolio, log=True):\n\n my_sum, my_assets = sum_nav(portfolio)\n # print(my_sum, my_assets)\n for asset in my_assets:\n num = 0\n for nav, quantity in my_assets[asset]:\n num += float(nav) * float(quantity)\n\n pourcent = (num * 100) / my_sum\n if 1 > pourcent or pourcent > 10:\n if log:\n print(\"Le portefeuille ne respecte PAS la condition des navs\")\n return False\n\n if log:\n print(\"Le portefeuille respecte la condition des navs\")\n return True", "def is_valid(self):\n\n return all([project.is_valid() for project in self.projects])", "def frictionless_validate(self):\n if validate(self.datapackage_path).valid == True:\n return True\n else:\n print(\n \"\\nFrictionless has detected that this is an invalid package with errors %s\"\n % validate(self.datapackage_path).errors\n )\n return False", "def validate_profitability(profitability):\n if profitability == 'Bad':\n return False\n return True", "def check_contents(df):\n \n if len(df) == 0:\n print(\n \"Portfolio {} in scope {} contains no cash on {:%Y-%m-%d}\".format(\n portfolio,scope,start_date)\n )", "def set_fund_portfolio(self,\n fund_id: str,\n portfolio: FundPortfolio) -> bool:\n try:\n self.client.hset('fund_portfolio', fund_id, str(portfolio))\n return True\n except Exception as e:\n print(f'Error setting fund portfolio for \"{fund_id}\" from Redis: {traceback.format_exc()}')\n return False", "def has_portfolio_applications(_user, portfolio=None, **_kwargs):\n if portfolio and portfolio.applications:\n return True", "def is_valid_money(money):\r\n global game_over\r\n if money == None:\r\n game_over = True\r\n print(\"Input money file not found\")\r\n return False\r\n else:\r\n try:\r\n money = float(money)\r\n except (ValueError,TypeError):\r\n game_over = True\r\n print(\"Invalid money in the input file!\")\r\n print(\"Data cannot be converted to Money!!\")\r\n #print(f\"Content: {money}, type(content): {type(money)}\")\r\n return False\r\n except Exception as e:\r\n print(\"Unexpected error occured while validating money. Terminating game!\")\r\n print(type(e),e)\r\n sys.exit()\r\n return True", "def _validate_fp_settings(self):\n valid = True\n if self.fit:\n self.config.validate()\n else:\n log.info(\"No results available from fit.\")\n valid = False\n if \"flux-points\" not in self.settings:\n log.info(\"No values declared for the energy bins.\")\n valid = False\n elif \"fp_binning\" not in self.settings[\"flux-points\"]:\n log.info(\"No values declared for the energy bins.\")\n valid = False\n if not valid:\n log.info(\"Flux points calculation cannot be done.\")\n return valid", "def _validate_project(obj):\n projects = ConductorDataBlock().projects()\n project_att = obj.get_attribute(\"conductor_project_name\")\n label = project_att.get_applied_preset_label()\n if label == PROJECT_NOT_SET[\"name\"]:\n ix.log_error('Project is not set for \"{}\".'.format(obj.get_name()))\n try:\n next(p for p in projects if str(p[\"name\"]) == label)\n except StopIteration:\n ix.log_error(\n 'Cannot find project \"{}\" at Conductor. \\\n Please ensure the PROJECT dropdown contains a \\\n valid project.'.format(\n label\n )\n )", "def assess_portfolio(sd = dt.datetime(2008,1,1), ed = dt.datetime(2009,1,1), \\\n syms = [\"GOOG\",\"AAPL\",\"GLD\",\"XOM\"], \\\n allocs=[0.1,0.2,0.3,0.4], \\\n sv=1000000, rfr=0.0, sf=252.0, \\\n gen_plot=False):\n\n # Read in adjusted closing prices for given symbols, date range\n dates = pd.date_range(sd, ed)\n prices_all = get_data(syms, dates) # automatically adds SPY\n prices = prices_all[syms] # only portfolio symbols\n prices_SPY = prices_all[\"SPY\"] # only SPY, for comparison later\n\n # Get daily portfolio value\n port_val = get_portfolio_value(prices, allocs, sv)\n\n # Get portfolio statistics (sddr == volatility)\n cr, adr, sddr, sr = get_portfolio_stats(port_val, rfr, sf)\n\n # Compare daily portfolio value with SPY using a normalized plot\n if gen_plot:\n # Create a temporary dataframe with both the SPY and Portfolio\n df_temp = pd.concat([port_val, prices_SPY], keys=[\"Portfolio\", \"SPY\"],\n axis=1)\n plot_normalized_data(df_temp, title=\"Daily portfolio and SPY\", \n xlabel=\"Date\", ylabel=\"Normalized price\") \n\n # Compute end value\n ev = port_val.ix[-1, 0]\n\n return cr, adr, sddr, sr, ev", "def _valid(self, L: Schedule, i: int, v: Course):\n return (not L.layer_is_full(i, v.units)) and ((i + self.start_q) % self.total_quarter_codes) in v.quarterCodes", "def _validate_project_exists(self):\n odooclient = odoo_client.get_odoo_client()\n try:\n search = [['tenant_id', '=', self.project_id]]\n project = odooclient.projects.list(search)[0]\n self.odoo_project = project\n self.odoo_project_id = project.id\n self.odoo_project_name = project.name\n self.add_note('Odoo project %s (%s) exists.'\n % (self.odoo_project_name, self.odoo_project_id))\n return True\n except IndexError:\n self.add_note('Project %s does not exist in odoo'\n % self.project_id)\n return False", "def create_portfolio(port: str) -> Result:\n try:\n message = Portfolio.new(port)\n except AppError as e:\n return Result(success=False, message=\"create_portfolio: \"+str(e), severity=e.severity)\n else:\n return Result(success=True, message=message)", "def input_valid(self, settings_to_test):\n return (True, \"ok\")\n #return (False, \"All arguments are assumed invalid until verified\")", "def check_file_validation(self):\r\n if self.snap is None:\r\n # file existence\r\n print('file for stock %s at date %s is not existed' % (self.code, self.date))\r\n return False\r\n elif self.snap.iloc[-1]['iTurnover'] == 0:\r\n # stock is traded or not\r\n print('stock %s has no trade record at date %s' % (self.code, self.date))\r\n return False\r\n else:\r\n return True", "def isValidSubmission():\n global scriptDialog\n\n errors = \"\"\n warnings = \"\"\n\n # Check if SU file exists\n sceneFile = scriptDialog.GetValue( \"SceneBox\" )\n if not os.path.isfile( sceneFile ):\n errors += 'SketchUp file \"%s\" does not exist.\\n\\n' % sceneFile\n elif PathUtils.IsPathLocal( sceneFile ) and not scriptDialog.GetValue( \"SubmitSceneBox\" ):\n warnings += 'SketchUp file \"%s\" is local.\\n\\n' % sceneFile\n\n # Check Output\n exportDirectory = scriptDialog.GetValue( \"ExportDirectoryBox\" ).strip()\n if not exportDirectory:\n errors += \"An output directory was not specific.\\n\\n\"\n elif not os.path.isdir( exportDirectory ):\n errors += 'The directory of the output file does not exist: \"%s\"\\n\\n' % exportDirectory\n\n isVray = scriptDialog.GetEnabled( \"VrayBox\" ) and scriptDialog.GetValue( \"VrayBox\" )\n vrayVersion = int( scriptDialog.GetValue( \"VrayVersionBox\" ) )\n vrayFrames = scriptDialog.GetValue( \"VrayFramesBox\" ).strip()\n # Check if a valid frame range has been specified for V-Ray 3 or later\n if isVray and vrayVersion >= 3 and is2dAnimation() and not FrameUtils.FrameRangeValid( vrayFrames ):\n errors += 'Frame range \"%s\" is not valid.\\n\\n' % vrayFrames\n\n if errors:\n scriptDialog.ShowMessageBox( \"The following errors occurred, you must fix these before continuing.\\n\\n%s\" % errors.strip(), \"Error\" )\n return False\n elif warnings:\n result = scriptDialog.ShowMessageBox( \"The following warnings occurred, are you sure you want to continue?\\n\\n%s\" % warnings.strip(), \"Warning\", ( \"Yes\", \"No\" ) )\n if result == \"No\":\n return False\n\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adjust the number of new stocks to the target investment value.
def _adjust_new_stocks_to_target(portfolio: dict, portf_goal_val: float): # Compute current total value (including reinvest) portf_total_val = _calc_total_val(portfolio) # Get sorted list of DeltaRatio for all stocks ascending_ppp = sorted(portfolio["Stocks"], key=lambda x: x["DeltaRatio"]) if portf_total_val > portf_goal_val: # Need to round down some stock, starting with those least affecting the ratio for stock in ascending_ppp: stock["NewShares"] -= 1 portf_total_val -= stock["Price"] if portf_total_val < portf_goal_val: break else: # Need to round up some stock, starting with those least affecting the ratio for stock in ascending_ppp: stock["NewShares"] += 1 portf_total_val += stock["Price"] if portf_total_val > portf_goal_val: # Undo last step stock["NewShares"] -= 1 portf_total_val -= stock["Price"] _eval_rebalanced_ratio(portfolio, portf_total_val)
[ "def update(self, target):\n change = (self.coeff * (target - self.price) +\n self.momentum * self.last_change)\n self.last_change = change\n \n limiter = self.buyer and min or max\n self.price = int(limiter(self.price + change, self.limit))", "def increment_stock(self, q):\n self.__stock += q", "def update_stockcounter(self, stock):\n\n bg = stock.get_mw_price()\n self.update_portfolio()\n stock.counter = int(float(self.buyingpower / bg / stock.tradeshares))\n print \" --- Updated Net Worth: %s | Buying Power: %s ---\" % (self.networth, self.buyingpower)", "def refill_stocks(self):\n self.edit_stocks(**self._max_stocks)", "def change_config(self, new_config):\n with transaction.atomic():\n self._sell_all()\n for conf in new_config:\n stock = Stock.objects.get(id=conf.id)\n quote = stock.latest_quote()\n self.available -= quote.value * conf.quantity\n self.stocks.create(\n stock=stock,\n quantity=conf.quantity,\n start=datetime.datetime.now() - datetime.timedelta(\n days=31), )\n if self.available < 0:\n raise Exception(\"Not enough money available\")\n self.save()", "def change_product_qty(self):\n Inventory = self.env['stock.inventory']\n consumption_obj = self.env['consumption.record']\n for wizard in self:\n product = wizard.product_id.with_context(location=wizard.location_id.id, lot_id=wizard.lot_id.id)\n line_data = wizard._prepare_inventory_line()\n\n if wizard.product_id.id and wizard.lot_id.id:\n inventory_filter = 'none'\n elif wizard.product_id.id:\n inventory_filter = 'product'\n else:\n inventory_filter = 'none'\n\n date_obj = datetime.strptime(wizard.date, DATE_FORMAT)\n date = date_obj.strftime(\"%Y-%m-%d 00:00:00\")\n staff_ids = []\n for line in wizard.staff_ids:\n staff_ids.append(line.id)\n inventory = Inventory.create({\n 'name': _('INV: %s') % tools.ustr(wizard.product_id.name),\n 'filter': inventory_filter,\n 'product_id': wizard.product_id.id,\n 'location_id': wizard.location_id.id,\n 'lot_id': wizard.lot_id.id,\n 'date': date,\n 'line_ids': [(0, 0, line_data)],\n })\n inventory.action_done()\n for i in inventory.move_ids:\n i.consumed = True\n consumption_obj.create({\n 'name': self.env['ir.sequence'].next_by_code('consumption.record'),\n 'product_id': wizard.product_id.id,\n 'product_tmpl_id': wizard.product_tmpl_id.id,\n 'product_variant_count': wizard.product_variant_count,\n 'new_quantity': wizard.new_quantity,\n 'lot_id': wizard.lot_id.id,\n 'location_id': wizard.location_id.id,\n 'barcode': wizard.barcode,\n 'staff_ids': [(6, 0, staff_ids)],\n 'user_id': self.env.uid,\n 'note': wizard.note,\n 'date': wizard.date,\n 'inventory_id': inventory.id\n })\n return {'type': 'ir.actions.act_window_close'}", "def test_update_depends_stock(self):\n with mn.model() as m:\n Foo = mn.stock('Foo', lambda: 1, (), lambda x: x, ('Bar',))\n Bar = mn.constant('Bar', 99)\n\n self.assertEqual(m['Foo'][''], 99)\n m['Bar'][''] = 90\n m.recalculate()\n self.assertEqual(m['Foo'][''], 90)\n m.step()\n self.assertEqual(m['Foo'][''], 91)", "def adjust_burn(self, new_burn):\n self.demand_per_unit_map_dn_um.array = new_burn\n self.recalc()", "def edit_stocks(self, **stocks):\n prev_stocks = copy.copy(self.stocks)\n for type_ in Machine.StocksType:\n try:\n new_val = stocks[type_]\n except KeyError:\n pass\n else:\n if self.stocks[type_] < new_val <= self.max_stocks[type_]:\n self.stocks[type_] = new_val\n self._log.append(StockLog(prev_stocks, self.stocks))", "def give_raise(self, amount=5000):\n self.salary += amount", "def test_patch_investment_value(self):\n pass", "def increase(self, additional_bet):\n self._amount += additional_bet", "def update_profits(self, growth):\n self.profit = int(self.profit * growth)\n self.profit_history.append(self.profit)", "def test_stock_with_user_setting_amount(self):\n with mn.model() as m:\n Foo = mn.stock('Foo', 1, 0)\n\n m.step()\n self.assertEqual(Foo[''], 1)\n Foo[''] = 10\n self.assertEqual(Foo[''], 10)\n m.step()\n self.assertEqual(Foo[''], 11)\n m.reset()\n m.step()\n self.assertEqual(Foo[''], 1)\n Foo[''] = 7\n m.reset(reset_external_vars=False)\n self.assertEqual(Foo[''], 0)", "def change_price_precent(self):\n stock_firstday = self.closeprice[0]\n self.dataframe['stock_%chg'] = (self.closeprice - stock_firstday)/stock_firstday\n change_price_precent = self.dataframe['stock_%chg']\n return change_price_precent", "def upgrade(self, amount):\n if self.maxvalue + amount < 1:\n amount = 1 - self.maxvalue\n if self.maxvalue + amount > MAX_STAT_VALUE:\n amount = MAX_STAT_VALUE - amount\n self.maxvalue += amount\n self.value += amount", "def give_raise(self, amount=5000):\n self.salary += float(amount)", "def update_quantity(self, company: Company, quantity: int):\n pass", "def updateInventory(order_food, stock):\n stock[7]=int(stock[7])-order_food[\"nBurgers\"]\n stock[8]=int(stock[8])-order_food[\"nLettuce\"]\n stock[9]=int(stock[9])-order_food[\"nTomato\"]\n stock[10]=int(stock[10])-order_food[\"nVeggie\"]\n stock[11]=int(stock[11])-order_food[\"nBacon\"]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Evaluate the rebalanced ratio of stocks in a portfolio.
def _eval_rebalanced_ratio(portfolio: dict, portf_total_val: float): for stock in portfolio["Stocks"]: stock["RebalancedRatio"] = ( (stock["Shares"] + stock["NewShares"]) * stock["Price"] ) / portf_total_val
[ "def calculate_portfolio_return(self, price_df: pd.DataFrame) -> None:\n # Keep only data of stocks in the portfolio\n select_query = ' or '.join(f\"symbol == '{val[1]}'\" for val in self.stocks)\n self.price_df = price_df.query(select_query) \n # Calculate returns\n self.price_df['weighted_ret'] = self.price_df['dailyret'] * self.price_df['weight'] # weight * daily return\n self.portfolio_daily_returns = self.price_df.groupby('date')['weighted_ret'].sum()\n self.expected_daily_return = self.portfolio_daily_returns.mean()\n self.volatility = self.portfolio_daily_returns.std()", "def test_stock_price_earnings_ratio(self):\n\n # Make a mock object for testing.\n sALE = Stock('ALE', 'common', 23, nan, 60)\n # Add some mock Trades.\n sALE.buy(500, 25)\n sALE.sell(300, 15)\n self.assertEqual(len(sALE._trades), 2)\n # Make a mock object for testing.\n sGIN = Stock('GIN', 'preferred', 8, 0.02, 100)\n # Add some mock Trades.\n sGIN.buy(320, 95)\n sGIN.sell(180, 110)\n self.assertEqual(len(sGIN._trades), 2)\n\n # `ALE` stock should use the last_dividend as dividend\n self.assertEqual(sALE.price_earnings_ratio(),\n ((500*25+300*15)/(500+300)) / 23.)\n\n # But `GIN` stock should the fixed_dividend * par_value as dividend\n self.assertEqual(sGIN.price_earnings_ratio(),\n ((320*95+180*110)/(320+180)) / (0.02 * 100))", "def _calc_current_val(portfolio: dict) -> float:\n return sum([stock[\"Shares\"] * stock[\"Price\"] for stock in portfolio[\"Stocks\"]])", "def _calc_total_val(portfolio: dict) -> float:\n if \"NewShares\" in portfolio[\"Stocks\"][0]:\n return _calc_current_val(portfolio) + _calc_reinvest_val(portfolio)\n\n return _calc_current_val(portfolio)", "def calculate_expected_beta(self, spy_df: pd.DataFrame) -> None:\n df = pd.merge(pd.DataFrame(self.portfolio_daily_returns), spy_df, on = 'date', how = 'inner')\n self.expected_beta = df['weighted_ret'].cov(df['spy_dailyret']) / df['spy_dailyret'].var()", "def equity_risk_premium(self):\n market = yf.Ticker(self.market_name)\n market_data = market.history(period=self.per)\n expected_return = (market_data['Close'][-1] - market_data['Close'][0]) / market_data['Close'][0]\n\n risk_free_rate = yf.Ticker(\"^IRX\")\n risk_free_rate = risk_free_rate.history(period=\"today\")\n erp = expected_return - float(risk_free_rate['Open'])\n return erp, expected_return", "def get_portfolio_sharpe_ratio(portfolio_data, index_data):\n # calculate excess returns\n portfolio_returns, index_returns = get_excess_returns(portfolio_data, index_data)\n # calculate yearly returns for index\n alpha = get_portfolio_alpha(portfolio_data, index_data)\n # caclculate standard deviation of portfolio return\n std_portfolio_return = np.std(portfolio_returns)\n # return sharpe ratio\n return alpha / std_portfolio_return", "def assess_portfolio(\n sd = dt.datetime(2008,1,1), ed = dt.datetime(2009,1,1), \\\n syms = ['GOOG','AAPL','GLD','XOM'], \\\n allocs=[0.1,0.2,0.3,0.4], \\\n sv=1000000, rfr=0.0, sf=252.0, \\\n gen_plot=False):\n \n # read in adjusted closing prices for given symbols, date range\n # adding SPY to allocation for calulations and trading days\n dates = pd.date_range(sd.date(), ed.date())\n df_all = get_data(syms, dates) # automatically adds SPY\n df = df_all[syms] \n \n # get daily portfolio value \n df_nrm = df / df.ix[0,:] \n allocated = df_nrm * allocs\n position_values = allocated * sv\n port_value = position_values.sum(axis = 1)\n # daily returns (y_{t} = x_{t}/x_{t-1} - 1\n d_returns = port_value.copy() \n d_returns = (port_value/port_value.shift(1) - 1)\n d_returns = d_returns[1:]\n \n # Below are desired output values\n \n # cumulative return (final - initial) - 1\n cr = port_value[-1] / port_value[0] - 1\n # average daily return\n adr = d_returns.mean()\n # standard deviation of daily return\n sddr = d_returns.std()\n # sharpe ratio ((Mean - Risk free rate)/Std_dev)\n daily_rfr = (1.0 - rfr)**(1/252) - 1 #Should this be sampling freq instead of 252? \n sr = (d_returns - daily_rfr).mean() / sddr\n sr_annualized = sr * (sf**0.5)\n \n # compare daily portfolio value with SPY using a normalized plot\n if gen_plot:\n df_nrm_SPY = df_all['SPY'] / df_all['SPY'].ix[0,:]\n \n port_value_norm = port_value / port_value.ix[0,:]\n port_vs_SPY = df_nrm_SPY.copy()\n port_vs_SPY = port_vs_SPY.to_frame().join(port_value_norm.to_frame('Portfolio'))\n \n ax_portfolio = port_vs_SPY.plot(title = 'Daily Returns against SPY', grid = True, legend = 'reverse')\n ax_portfolio.set_xlabel('Date')\n ax_portfolio.set_ylabel('Normalized Price')\n plt.show()\n \n # end value\n ev = port_value[-1]\n \n return cr, adr, sddr, sr_annualized, ev", "def calculate_beta(stock, ind, full_stock):\n # path = os.path.join(os.getcwd(), \"Data\")\n\n stock[\"% Return of Company\"] = (\n (full_stock[\"Close Price\"] / full_stock['Close Price'].shift(-1))-1)*100\n\n full_stock[\"% Return of Company\"] = (\n (full_stock[\"Close Price\"] / full_stock['Close Price'].shift(-1))-1)*100\n\n ind[\"Date\"] = pd.to_datetime(ind[\"Date\"])\n stock[\"Date\"] = pd.to_datetime(stock[\"Date\"])\n\n s = full_stock.Date.head(1).values[0]\n e = full_stock.Date.tail(1).values[0]\n ind = ind[ind.Date.between(e, s)]\n ind = ind.iloc[::-1]\n ind.rename(columns={'Close': 'Close Price of SP500',\n '% Return': '% Return of SP500'}, inplace=True)\n ind.drop(['Open', 'High', 'Low', '% YTD'], axis=1, inplace=True)\n ind[\"Date\"] = pd.to_datetime(ind[\"Date\"])\n inddf = ind.copy()\n stock = stock.set_index(\"Date\")\n inddf = inddf.set_index(\"Date\")\n full_stock = full_stock.set_index(\"Date\")\n for date, row in stock.iterrows():\n try:\n stock.loc[date, 'Close Price of SP500'] = inddf.loc[date,\n 'Close Price of SP500']\n stock.loc[date, '% Return of SP500'] = inddf.loc[date,\n '% Return of SP500']\n except:\n pass\n stock = stock.reset_index()\n full_stock = full_stock.reset_index()\n inddf = inddf.reset_index()\n sp500 = inddf[\"% Return of SP500\"]\n company = full_stock[\"% Return of Company\"]\n results = list()\n for i in range(stock.shape[0]):\n # cov = np.cov(company[i:],sp500[i:])[0][1]\n cov = np.ma.cov(np.ma.masked_invalid(\n np.array(company[i:], sp500[i:-1])), rowvar=False)\n var = np.nanvar(sp500[i:-1])\n res = var/cov\n results.append(res)\n stock[\"Beta\"] = results\n return stock", "def sharpe_ratio(allocs, normed):\n alloced = normed*allocs\n port_val = alloced.sum(axis=1) #gets total normalized returns for the portfolio as a whole\n daily_returns = compute_daily_returns(port_val)\n sddr = daily_returns.std()\n sr = ((daily_returns).mean()/sddr)*(252.**(1./2)) #computes sr\n return sr*-1 #multiply by negative 1 because we actually want to maximize sr", "def percent_price_reduction(change):\n \n upcoming_price_changes(change)\n\n # TODO do you wish to continue?\n\n sql_update = \"\"\"\n update `tabItem Price` ip\n \n left join `tabItem` it\n on ip.item_code = it.item_code\n \n set ip.price_list_rate = ip.price_list_rate + (ip.price_list_rate * %s / 100.0)\n\n where ip.selling = 1\n and it.ebay_id REGEXP '[0-9]'\n \n and it.modified < now() - interval 10 day\n \n and ((it.standard_rate >25 and it.delivery_type = 'Standard Parcel')\n or (it.standard_rate >75 and it.delivery_type = 'Pallet'))\n and (select count(sii.name) from `tabSales Invoice Item` sii where sii.item_code = it.item_code and sii.docstatus=1) = 0\n \n \"\"\"%(change)\n\n frappe.db.sql(sql_update, auto_commit=True)\n \n sql_update_it = \"\"\"\n update `tabItem` it\n\n set \n it.standard_rate = it.standard_rate + (it.standard_rate * %s / 100.0),\n it.vat_inclusive_price = it.vat_inclusive_price + (it.vat_inclusive_price * %s / 100.0)\n \n where \n it.ebay_id REGEXP '[0-9]'\n and it.modified < now() - interval 30 day\n \n and ((it.standard_rate >25 and it.delivery_type = 'Standard Parcel')\n or (it.standard_rate >75 and it.delivery_type = 'Pallet'))\n and (select count(sii.name) from `tabSales Invoice Item` sii where sii.item_code = it.item_code and sii.docstatus=1) = 0\n \n \"\"\"%(change, change)\n\n frappe.db.sql(sql_update_it, auto_commit=True)\n\n print(\"Price reduction completed\")", "def neg_sharpe_ratio(weights, riskfree_rate, er, cov):\n r = portfolio_returns(weights, er)\n vol = portfolio_vol(weights, cov)\n return -(r - riskfree_rate)/vol", "def _calculate_profit(self):\n return self.calculate_worth() - self.initial_value", "def pct_change():\n original_value = bank_of_rick.original_value\n current_total_value = sum(total_value())\n return 100 * (current_total_value - original_value) / original_value", "def compute_revenue(c):\n money_per_sell = 5.\n CPC = 0.5\n return c[1,1]*money_per_sell-(c[0,1]+c[1,1])*CPC", "def assess_portfolio(sd = dt.datetime(2008,1,1), ed = dt.datetime(2009,1,1), \\\n syms = [\"GOOG\",\"AAPL\",\"GLD\",\"XOM\"], \\\n allocs=[0.1,0.2,0.3,0.4], \\\n sv=1000000, rfr=0.0, sf=252.0, \\\n gen_plot=False):\n\n # Read in adjusted closing prices for given symbols, date range\n dates = pd.date_range(sd, ed)\n prices_all = get_data(syms, dates) # automatically adds SPY\n prices = prices_all[syms] # only portfolio symbols\n prices_SPY = prices_all[\"SPY\"] # only SPY, for comparison later\n\n # Get daily portfolio value\n port_val = get_portfolio_value(prices, allocs, sv)\n\n # Get portfolio statistics (sddr == volatility)\n cr, adr, sddr, sr = get_portfolio_stats(port_val, rfr, sf)\n\n # Compare daily portfolio value with SPY using a normalized plot\n if gen_plot:\n # Create a temporary dataframe with both the SPY and Portfolio\n df_temp = pd.concat([port_val, prices_SPY], keys=[\"Portfolio\", \"SPY\"],\n axis=1)\n plot_normalized_data(df_temp, title=\"Daily portfolio and SPY\", \n xlabel=\"Date\", ylabel=\"Normalized price\") \n\n # Compute end value\n ev = port_val.ix[-1, 0]\n\n return cr, adr, sddr, sr, ev", "def portfolio_risk_and_return_and_rho():\n sum1 = 0\n sum2 = 0\n sum3 = 0\n\n data = get_daily_returns_covmatrix()\n daily_return = data[0]\n covmatrix = data[2]\n daily_risk = data[1]\n for i in range(len(portfolio_list)):\n for j in range(len(portfolio_list)):\n if i == j:\n sum1 += float(weights[i]) * float(weights[j]) * np.square(covmatrix[i,j])\n else:\n sum2 += float(weights[i]) * float(weights[j]) * covmatrix[i,j]\n sum3 += weights[i] * weights[j] * daily_risk[i] * daily_risk[j]\n sum = sum1 + sum2\n portfolio_risk = np.sqrt(sum * 252)\n \n risk_daily = np.square(portfolio_risk)/252\n rho = 0\n if len(portfolio_list) != 1:\n rho = (risk_daily - sum1) / sum3\n else:\n rho = 0\n\n mean_daily_returns = np.mean(daily_return)\n portfolio_return = np.sum(mean_daily_returns * np.asarray(weights)) * 252\n\n return (portfolio_risk,portfolio_return,rho)", "def get_revenue(self):\n\t\treturn self.get_income() - self.get_sum_costs()", "def neg_sharpe_ratio(weights,riskfree_rate,er,cov):\r\n r=portfolio_return(weights,er)\r\n vol=portfolio_vol(weights,cov)\r\n return -(r-riskfree_rate)/vol" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate current portfolio value.
def _calc_current_val(portfolio: dict) -> float: return sum([stock["Shares"] * stock["Price"] for stock in portfolio["Stocks"]])
[ "def _calc_total_val(portfolio: dict) -> float:\n if \"NewShares\" in portfolio[\"Stocks\"][0]:\n return _calc_current_val(portfolio) + _calc_reinvest_val(portfolio)\n\n return _calc_current_val(portfolio)", "def current_values(self):\n\t\t# remove duplicate tickers\n\t\tsymbs = list(set(np.array(self.portfolio['Ticker'])))\n\n\t\tdf_curr = get_current_prices(symbs)\n\t\tsymbs_prices = np.array(get_current_prices(symbs))\n\t\t\n\t\t# update portfolio with duplicates\n\t\tfor symb, symb_price in zip(symbs, symbs_prices):\n\t\t\twhere_same = np.where(self.portfolio[\"Ticker\"]==symb)[0]\n\t\t\tself.portfolio.loc[where_same, \"CurrentPrice\"] = symb_price\n\n\t\tself.current_net_value = np.dot(self.portfolio['CurrentPrice'], self.portfolio['NumShares'])\n\n\t\t## Portfolio without duplicate buys\n\t\tportfolio_reduced = self.portfolio[['Ticker','NumShares','CurrentPrice']]\n\t\tportfolio_reduced = portfolio_reduced.groupby('Ticker').agg({ 'NumShares':np.sum, 'CurrentPrice': 'first'}).reset_index()\n\t\tself.portfolio_reduced = portfolio_reduced", "def projected_market_value(self, current_prices: dict) -> dict:\n\n projected_value = {}\n total_value = 0.0\n total_invested_capital = 0.0\n total_profit_or_loss = 0.0\n\n position_count_profitable = 0\n position_count_not_profitable = 0\n position_count_break_even = 0\n\n for symbol in current_prices:\n\n if self.in_portfolio(symbol=symbol):\n\n projected_value[symbol] = {}\n current_quantity = self.positions[symbol]['quantity']\n purchase_price = self.positions[symbol]['purchase_price']\n current_price = current_prices[symbol]['lastPrice']\n is_profitable = self.is_profitable(\n symbol=symbol, current_price=current_price)\n\n projected_value[symbol]['purchase_price'] = purchase_price\n projected_value[symbol]['current_price'] = current_prices[symbol]['lastPrice']\n projected_value[symbol]['quantity'] = current_quantity\n projected_value[symbol]['is_profitable'] = is_profitable\n\n # Calculate total market value.\n projected_value[symbol]['total_market_value'] = (\n current_price * current_quantity\n )\n\n # Calculate total invested capital.\n projected_value[symbol]['total_invested_capital'] = (\n current_quantity * purchase_price\n )\n\n projected_value[symbol]['total_loss_or_gain_$'] = ((current_price - purchase_price) * current_quantity)\n projected_value[symbol]['total_loss_or_gain_%'] = round(((current_price - purchase_price) / purchase_price), 4)\n\n total_value += projected_value[symbol]['total_market_value']\n total_profit_or_loss += projected_value[symbol]['total_loss_or_gain_$']\n total_invested_capital += projected_value[symbol]['total_invested_capital']\n\n if projected_value[symbol]['total_loss_or_gain_$'] > 0:\n position_count_profitable += 1\n elif projected_value[symbol]['total_loss_or_gain_$'] < 0:\n position_count_not_profitable += 1\n else:\n position_count_break_even += 1\n\n projected_value['total'] = {}\n projected_value['total']['total_positions'] = len(self.positions)\n projected_value['total']['total_market_value'] = total_value\n projected_value['total']['total_invested_capital'] = total_invested_capital\n projected_value['total']['total_profit_or_loss'] = total_profit_or_loss\n projected_value['total']['number_of_profitable_positions'] = position_count_profitable\n projected_value['total']['number_of_non_profitable_positions'] = position_count_not_profitable\n projected_value['total']['number_of_breakeven_positions'] = position_count_break_even\n\n return projected_value", "def get_portfolio_value(prices, allocs, sv):\n\n # Normalize the prices according to the first day\n norm_prices = normalize_data(prices)\n\n # Compute prices based on the allocations\n alloc_prices = norm_prices * allocs\n\n # Calculate position values\n pos_vals = alloc_prices * sv\n\n # Get daily portfolio value\n port_val = pos_vals.sum(axis=1).to_frame()\n\n return port_val", "def _get_val(self):\n return self.stock_owned.dot(self.stock_price) + self.cash_in_hand", "def calculate_portfolio_return(self, price_df: pd.DataFrame) -> None:\n # Keep only data of stocks in the portfolio\n select_query = ' or '.join(f\"symbol == '{val[1]}'\" for val in self.stocks)\n self.price_df = price_df.query(select_query) \n # Calculate returns\n self.price_df['weighted_ret'] = self.price_df['dailyret'] * self.price_df['weight'] # weight * daily return\n self.portfolio_daily_returns = self.price_df.groupby('date')['weighted_ret'].sum()\n self.expected_daily_return = self.portfolio_daily_returns.mean()\n self.volatility = self.portfolio_daily_returns.std()", "def __calculate_total_portfolio_val(self, df):\n result = df.sum(axis=1)\n return result", "def get_portfolio_value(self, dates):\n \n u_symb = list(self.symbols)\n for s in self.symbols:\n if self.amount[s] == 0:\n u_symb.remove(s)\n u_symb.remove('Cash')\n \n df_value = pd.DataFrame(index=dates, columns=['Value'])\n df_data = get_data(u_symb, dates)\n \n df_value['Value'] = self.amount['Cash']\n \n for s in u_symb:\n df_value['Value'] += abs(self.amount[s]) * df_data[s] \n \n df_value = df_value.dropna()\n \n return df_value", "def value(self):\n return self.shares() * self.price()", "def get_asset_value(self):\n if self.asset_owned:\n # not necessarily exact to live data, it just uses the most recent data point to calculate the value of all assets\n return self.asset_amount_per_action * self.all_asset_data_points[-1]\n else:\n return 0", "def calc_values(self):\n atm_contract_index = (\n np.abs(self.chain[\"strike\"] - self.underlying_price)\n ).idxmin()\n atm_impliedvol = self.chain.iloc[atm_contract_index][\"impvol\"]\n\n # Calculate option value for all options using ATM volatility\n self.chain[\"model_value\"] = self.chain.apply(\n lambda x: bs_price(\n x[\"right\"],\n x[\"underprice\"],\n x[\"strike\"],\n self.dte / 252,\n atm_impliedvol,\n self.risk_free_rate,\n ),\n axis=1,\n )\n self.chain[\"mid_price\"] = (self.chain[\"bid\"] + self.chain[\"ask\"]) / 2\n self.chain[\"skew_premium\"] = self.chain[\"mid_price\"] - self.chain[\"model_value\"]", "def earned_value(self): \n \n return self.apc * self.budget", "def total_value():\n cash_value = bank_of_rick.current_value\n investment_value = 0\n for ticker, investment in investments.items():\n if investment:\n investment_value += investment.current_value\n return cash_value, investment_value", "def planned_value(self):\n \n return self.ppc * self.budget", "def value_current(self):\n # get current value from Stockexchange\n #TODO: Transform to € if $\n value = self.history.iloc[-1]\n if self.info['currency'] != self.currency:\n currency = Converter(\n self.info['currency'], self.currency\n )\n value = currency.convert(value)\n\n return value", "def portfolioreturnVol(data, weight):\n # compute simple assets returns\n assets_return = data.pct_change().dropna()\n \n # compute portfolio returns\n portreturn = assets_return.dot(weight)\n \n # compute portfolio cumulative returns\n # extract the last day portfolio returns\n port_com = (1 + portreturn).cumprod() \n final_return = 1 - port_com[-1]\n \n # annu_ = assets_return.cov() * np.sqrt(252)\n # compute portfolio annualised volatility\n covariance = assets_return.cov()\n port_val = np.transpose(weight) @ covariance @ weight\n _annualised_vol = np.sqrt(port_val) * np.sqrt(252)\n \n return final_return, _annualised_vol", "def get_value_on_date(portfolio_data, date):\n value = float(portfolio_data['Cash'])\n for stock in portfolio_data['stocks'].keys():\n stock_data = mongo.db.stocks.find_one({\"_id\": stock})\n if stock_data is not None:\n # find value of share on date\n date_entry = filter(lambda x : x['Date'] == date, stock_data['historical_data'])\n # invalid date entered.\n if date_entry == []:\n return -1\n # multiply number of shares by share price and add to value\n value += (float(date_entry[0]['Close']) * float(portfolio_data['stocks'][stock]))\n return value", "def backtest_portfolio(self):\r\n\r\n # Set the portfolio object to have the same time period\r\n # as the positions DataFrame\r\n portfolio = pd.DataFrame(index=self.positions.index)\r\n pos_diff = self.positions.diff()\r\n\r\n # Work out the intraday profit of the difference\r\n # in open and closing prices and then determine\r\n # the daily profit by longing if an up day is predicted\r\n # and shorting if a down day is predicted\r\n portfolio['price_diff'] = self.bars['Close'] - self.bars['Open']\r\n portfolio['price_diff'][0:5] = 0.0\r\n portfolio['profit'] = self.positions[self.symbol] * portfolio['price_diff']\r\n\r\n # Generate the equity curve and percentage returns\r\n portfolio['total'] = self.initial_capital + portfolio['profit'].cumsum()\r\n portfolio['returns'] = portfolio['total'].pct_change()\r\n return portfolio", "def value(self) -> float:\n return self.days * float(self.rate)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate total value of the portfolio.
def _calc_total_val(portfolio: dict) -> float: if "NewShares" in portfolio["Stocks"][0]: return _calc_current_val(portfolio) + _calc_reinvest_val(portfolio) return _calc_current_val(portfolio)
[ "def __calculate_total_portfolio_val(self, df):\n result = df.sum(axis=1)\n return result", "def _calc_current_val(portfolio: dict) -> float:\n return sum([stock[\"Shares\"] * stock[\"Price\"] for stock in portfolio[\"Stocks\"]])", "def total(self):\n return self.sum.value", "def total_value():\n cash_value = bank_of_rick.current_value\n investment_value = 0\n for ticker, investment in investments.items():\n if investment:\n investment_value += investment.current_value\n return cash_value, investment_value", "def total(self):\n total = 0.0 \n for vinylid in self.cart:\n total += self.get_price(vinylid) \n return total", "def totalCash(self):\n return self.spent", "def calculateAmount(self) -> float:\n\n amount = 0\n for item in self.items:\n amount += item.price\n return amount", "def value(self):\n total = 0\n for value, amount in self.items():\n total += value * amount\n return total", "def get_income_sum(self):\n if len(self.account) == 0:\n return 0\n return self.account[self.account.value > 0].value.sum()", "def sum(self):\n\n return sum(self._values)", "def total_profit(self):\n self.store_profit += self.sale_profit\n return self.store_profit", "def get_total(self, num_lemonades):\n return self.price * num_lemonades", "def get_total(self):\n total = 0\n for order in self.orders:\n total += order.get_total()\n\n return total", "def calculate_portfolio_return(self, price_df: pd.DataFrame) -> None:\n # Keep only data of stocks in the portfolio\n select_query = ' or '.join(f\"symbol == '{val[1]}'\" for val in self.stocks)\n self.price_df = price_df.query(select_query) \n # Calculate returns\n self.price_df['weighted_ret'] = self.price_df['dailyret'] * self.price_df['weight'] # weight * daily return\n self.portfolio_daily_returns = self.price_df.groupby('date')['weighted_ret'].sum()\n self.expected_daily_return = self.portfolio_daily_returns.mean()\n self.volatility = self.portfolio_daily_returns.std()", "def get_portfolio_value(self, dates):\n \n u_symb = list(self.symbols)\n for s in self.symbols:\n if self.amount[s] == 0:\n u_symb.remove(s)\n u_symb.remove('Cash')\n \n df_value = pd.DataFrame(index=dates, columns=['Value'])\n df_data = get_data(u_symb, dates)\n \n df_value['Value'] = self.amount['Cash']\n \n for s in u_symb:\n df_value['Value'] += abs(self.amount[s]) * df_data[s] \n \n df_value = df_value.dropna()\n \n return df_value", "def get_total_price(self):\n return self.pizza.get_total_price()", "def cash(self):\n return self.cents / 100", "def get_portfolio_value(prices, allocs, sv):\n\n # Normalize the prices according to the first day\n norm_prices = normalize_data(prices)\n\n # Compute prices based on the allocations\n alloc_prices = norm_prices * allocs\n\n # Calculate position values\n pos_vals = alloc_prices * sv\n\n # Get daily portfolio value\n port_val = pos_vals.sum(axis=1).to_frame()\n\n return port_val", "def value(self):\n return self.shares() * self.price()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks for interface BDD complexity. Returns true if above threshold
def condition(iface: Interface) -> bool: if len(iface.pred) > maxnodes: print("Interface # nodes {} exceeds maximum {}".format(len(iface.pred), maxnodes)) return True return False
[ "def __check_constraints_feasibility(self):\n pass", "def should_hit(self):\n \n return self.hand.compute_bj_count() < 17", "def __check_objective_feasibility(self):\n pass", "def testable(self):\n return len(self.outputs) > 0", "def assert_numbers_of_calls_within_limits(\n self, die_classes: Iterable[DieOrPool]\n ) -> None:\n raise NotImplementedError", "def requires_high_test_strictness_level(self) -> bool:\n return self.ab_internal_ql >= IMPORTANT_CONNECTOR_THRESHOLDS[\"ql\"]", "def CheckBounds(self, ):\n ...", "def insurance_boundaries_defined(asset):\n\n if (asset.ins_limit >= 0 and asset.deductible >= 0):\n return True\n else:\n raise RuntimeError('Insurance boundaries for asset %s are not defined'\n % asset.asset_ref)", "def test_conformance(self):\n self._request_valid(\"conformance\")", "def check(self, inputs=None):\n limits = digio.ReadLimit(inputs=inputs)\n self.PowerOff = ('POWER' in limits)\n if not self.PowerOff:\n self.EastLim = ('EAST' in limits)\n self.WestLim = ('WEST' in limits)\n self.MeshLim = ('MESH' in limits)\n self.HorizLim = ('HORIZON' in limits)\n\n if self.EastLim or self.WestLim or self.MeshLim or self.HorizLim or self.PowerOff:\n self.HWLimit = True # The global limit flag can be set here, but only cleared\n # in detevent when it's safe (no jump/paddle motion)\n if (not self.OldLim) and (self.HWLimit):\n if self.PowerOff:\n logger.info('Telescope switched off.')\n else:\n logger.critical(\"Hardware limit reached!\")\n self.OldLim = True\n self.LimitOnTime = time.time() # Timestamp of the last time we hit a hardware limit", "def check(self):\n inrange = easydev.check_range\n inlist = easydev.check_param_in_list\n # check validity of the settings\n inlist(self.include_MSI_factor, [False, True], \"MSI\")\n inrange(self.feature_factor_threshold, 0, np.inf)\n inrange(self.MSI_factor_threshold, 0, np.inf)\n\n # all those methods are from statsmodels.stats.multitest.multipletests\n inlist(\n self.pvalue_correction_method,\n [\n \"bonferroni\",\n \"sidak\",\n \"holm-sidak\",\n \"simes-hochberg\",\n \"hommel\",\n \"fdr_bh\",\n \"fdr_tsbj\",\n \"fdr_tskby\",\n \"fdr\",\n ],\n \"pvalue correction method\",\n )\n inlist(self.equal_var_ttest, [True, False], \"equal_var_ttest\")\n inrange(self.minimum_nonna_ic50, 0, np.inf)\n inrange(self.FDR_threshold, 0, 100)\n inrange(self.pvalue_threshold, 0, np.inf)\n inrange(self.effect_threshold, 0, np.inf)\n\n # for now, if MSI is False, this cannot be a PANCAN analysis\n # but a cancer specific analysis\n if self.include_MSI_factor is False:\n assert self.analysis_type != \"PANCAN\", (\n \"If MSI factor is not included, the analysis must be cancer\"\n + \" specific (i.e., a tissue must be set.\"\n )\n\n valid_reg_meth = [\"OLS\", \"ElasticNet\", \"Lasso\", \"Ridge\"]\n inlist(self.regression_method, valid_reg_meth)\n\n inlist(self.pvalue_correction_level, [True, False])", "def breach(cur_units, limit):\n return cur_units >= limit", "def f5(_, target) -> bool:\n return target.get_hp() < 30", "def _check_attachment_point(testcase, attachment_point):\n\n host = attachment_point._get_host_for_useras_attachment()\n border_routers = list(host.border_routers.all())\n\n # The first BR is for the infrastructure links and also contains the inactive interfaces.\n infra_br = border_routers.pop(0)\n for iface in infra_br.interfaces.iterator():\n testcase.assertTrue(iface.remote_as().owner is None or not iface.link().active)\n\n # The other BRs contain up to 10 interfaces each.\n MAX_IFACES = 10\n for br in border_routers:\n # Expecting only active interfaces in these BRs\n testcase.assertTrue(all(interface.link().active for interface in br.interfaces.iterator()))\n c = br.interfaces.count()\n if br == border_routers[-1]: # only last one can have less than max\n testcase.assertLessEqual(c, MAX_IFACES)\n else:\n testcase.assertEqual(c, MAX_IFACES)", "def checkConfiguration(self):\n self.failUnless(self.filter.usePostiniScore)\n self.assertEquals(self.filter.postiniThreshhold, 5.0)", "def test_slice_thickness(self):\n self.assertEqual(self.cbct.thickness.passed, self.thickness_passed)", "def is_threshold_reach(self):\n return self.accumulate > self.threshold", "def IsInterfaceInRange(self, testSwitchInterface):\n return any(map(lambda r : r.IsInterfaceInRange(testSwitchInterface), self.rangeSpans))", "def check_collisions(self):\n\t\tpass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resolves a (parametrized) strategy to a tuple of strategy and the new field name.
def resolve_strategy( self, strategy: FieldStrategy | ParametrizedFieldStrategy ) -> Tuple[FieldStrategy, FieldNameFunc]: if isinstance(strategy, dict): return (strategy["strategy"], self.get_name_func_from_parameters(strategy)) else: return (strategy, self.get_name_func_for_strategy(strategy))
[ "def addStrategy(self, s) -> None:\n ...", "def _set_strategy(strategy: str) -> StrategyFunction:\n if strategy == \"all_perm\":\n return create_all_permutations\n if strategy == \"step\":\n return step_values\n if strategy == \"random\":\n return random_permutations\n if callable(strategy):\n return strategy\n raise SSUnsupportedError(\n f\"Permutation strategy given is not supported: {strategy}\"\n )", "def from_name(name):\n Strategy = possible_writers[name]\n if Strategy.isfunctional():\n strategy = Strategy()\n strategy.name = name\n return strategy\n raise NonFunctionalStrategy(name, Strategy.hint)", "def update_algo_parameter(self, parameter_name, new_parameter_value):\n if hasattr(self, parameter_name):\n setattr(self, parameter_name, new_parameter_value)\n if parameter_name == \"lr\":\n for param_group in self.pi_optimizer.param_groups:\n param_group['lr'] = new_parameter_value\n for param_group in self.q_optimizer.param_groups:\n param_group['lr'] = new_parameter_value\n for param_group in self.alpha_optimizer.param_groups:\n param_group['lr'] = new_parameter_value", "def field(\n _resolver_or_name: Union[str, Callable] = None,\n *,\n name: str = None,\n resolver: Callable = None,\n type: type = None,\n args: Dict[str, type] = None,\n doc: str = None,\n) -> Union[Field, Callable[..., Field]]:\n\n if _resolver_or_name is None:\n # either called as a higher order decorator `@field(...)`,\n # or as a constructor with implicit name `my_field = field(type=str)`\n if name is None:\n # if called as a field, we will be able to extract varname\n try:\n name = name or varname() # type: ignore\n except Exception:\n # otherwise, varname() will raise an exception and name will still be None\n pass\n else:\n # only set resolver in the case where we successfully extracted varname\n # (and so field was called as a constructor, missing the name)\n resolver = resolver or default_resolver(name)\n elif isinstance(_resolver_or_name, str):\n # calling `field` as a constructor, use provided resolver or the default one\n if name is not None:\n raise ValueError(\"Cannot specify field name with both positional and keyword argument\")\n name = _resolver_or_name\n resolver = resolver or default_resolver(name)\n else:\n # calling `field` as a flat decorator\n if resolver is not None:\n raise ValueError(\"Cannot specify resolver with both positional and keyword argument\")\n resolver = _resolver_or_name\n\n def decorator(resolver: Callable) -> Field:\n field_name = name or resolver.__name__\n if not FIELD_NAME_PATTERN.fullmatch(field_name):\n raise ValueError(f\"Field name must match {FIELD_NAME_PATTERN.pattern}, found '{field_name}'\")\n\n field_type = type or get_type_hints(resolver).get(\"return\")\n if field_type is None:\n raise ValueError(\n f\"No return value type hint on resolver and `type` parameter not present for '{field_name}'\"\n )\n\n field_args = _extract_field_args(resolver, args)\n field_doc = doc or resolver.__doc__\n\n return Field(field_name, resolver, field_type, field_args, field_doc)\n\n return decorator if resolver is None else decorator(resolver)", "def strategy_rebuild_from_dict(data):\n from nfv_vim.strategy._strategy_phases import strategy_phase_rebuild_from_dict # noqa: F401\n\n if not data:\n return None\n\n build_phase = strategy_phase_rebuild_from_dict(data['build_phase'])\n apply_phase = strategy_phase_rebuild_from_dict(data['apply_phase'])\n abort_phase = strategy_phase_rebuild_from_dict(data['abort_phase'])\n\n if STRATEGY_NAME.SW_PATCH == data['name']:\n strategy_obj = object.__new__(SwPatchStrategy)\n elif STRATEGY_NAME.SW_UPGRADE == data['name']:\n strategy_obj = object.__new__(SwUpgradeStrategy)\n elif STRATEGY_NAME.SYSYTEM_CONFIG_UPDATE == data['name']:\n strategy_obj = object.__new__(SystemConfigUpdateStrategy)\n elif STRATEGY_NAME.FW_UPDATE == data['name']:\n strategy_obj = object.__new__(FwUpdateStrategy)\n elif STRATEGY_NAME.KUBE_ROOTCA_UPDATE == data['name']:\n strategy_obj = object.__new__(KubeRootcaUpdateStrategy)\n elif STRATEGY_NAME.KUBE_UPGRADE == data['name']:\n strategy_obj = object.__new__(KubeUpgradeStrategy)\n else:\n strategy_obj = object.__new__(strategy.StrategyStage)\n\n strategy_obj.from_dict(data, build_phase, apply_phase, abort_phase)\n return strategy_obj", "def resolve_by_name(func, name, *args):\n if uuidutils.is_uuid_like(name):\n return name\n\n results = func(criterion={\"name\": \"%s\" % name}, *args)\n length = len(results)\n\n if length == 1:\n return results[0][\"id\"]\n elif length == 0:\n raise exceptions.NotFound(\"Name %s didn't resolve\" % name)\n else:\n msg = \"Multiple matches found for %s, please use ID instead.\" % name\n raise exceptions.NoUniqueMatch(msg)", "def set_query_strategy(self, strategy=\"QueryInstanceUncertainty\", **kwargs):\n # check \n if self._existed_query_strategy:\n raise Exception(\"You already has set the query strategy,don`t has to set it again.\")\n # user-defined strategy\n if callable(strategy):\n self.__custom_strategy_flag = True\n strategyname = kwargs.pop('strategyname', None)\n if strategyname is not None:\n self._query_function_name = strategyname\n else:\n self._query_function_name = 'user-defined strategy'\n self.__custom_func_arg = kwargs\n self._query_function = strategy(self._X, self._y, **kwargs)\n else:\n # a pre-defined strategy in ALiPy\n if strategy not in ['QueryInstanceQBC', 'QueryInstanceUncertainty', 'QueryRandom', 'QueryInstanceRandom',\n 'QueryInstanceGraphDensity', 'QueryInstanceQUIRE',\n 'QueryInstanceBMDR', 'QueryInstanceSPAL', 'QueryInstanceLAL',\n 'QueryExpectedErrorReduction']:\n raise NotImplementedError('Strategy {} is not implemented. Specify a valid '\n 'method name or privide a callable object.'.format(str(strategy)))\n else:\n self._query_function_name = strategy\n if strategy == 'QueryInstanceQBC':\n method = kwargs.pop('method', 'query_by_bagging')\n disagreement = kwargs.pop('disagreement', 'vote_entropy')\n self._query_function = QueryInstanceQBC(self._X, self._y, method, disagreement)\n elif strategy == 'QueryInstanceUncertainty':\n measure = kwargs.pop('measure', 'entropy')\n self._query_function = QueryInstanceUncertainty(self._X, self._y, measure)\n elif strategy == 'QueryInstanceRandom' or strategy == 'QueryRandom':\n self._query_function = QueryInstanceRandom(self._X, self._y)\n elif strategy == 'QueryExpectedErrorReduction':\n self._query_function = QueryExpectedErrorReduction(self._X, self._y)\n elif strategy == 'QueryInstanceGraphDensity' or strategy == 'QueryInstanceQUIRE':\n if self._train_idx is None:\n raise ValueError(\n 'train_idx is None.Please split data firstly.You can call set_data_split or split_AL to split data.')\n self._query_function_need_train_ind = True\n self._query_function_metric = kwargs.pop('metric', 'manhattan')\n self._query_function_kwargs = kwargs\n elif strategy == 'QueryInstanceBMDR':\n beta = kwargs.pop('beta', 1000)\n gamma = kwargs.pop('gamma', 0.1)\n rho = kwargs.pop('rho', 1)\n self._query_function = QueryInstanceBMDR(self._X, self._y, beta, gamma, rho, **kwargs)\n self.qp_solver = kwargs.pop('qp_sover', 'ECOS')\n elif strategy == 'QueryInstanceSPAL':\n mu = kwargs.pop('mu', 0.1)\n gamma = kwargs.pop('gamma', 0.1)\n rho = kwargs.pop('rho', 1)\n lambda_init = kwargs.pop('lambda_init', 0.1)\n lambda_pace = kwargs.pop('lambda_pace', 0.01)\n self._query_function = QueryInstanceSPAL(self._X, self._y, mu, gamma, rho, lambda_init, lambda_pace,\n **kwargs)\n self.qp_solver = kwargs.pop('qp_sover', 'ECOS')\n elif strategy == 'QueryInstanceLAL':\n mode = kwargs.pop('mode', 'LAL_iterative')\n data_path = kwargs.pop('data_path', '.')\n cls_est = kwargs.pop('cls_est', 50)\n train_slt = kwargs.pop('train_slt', True)\n self._query_function = QueryInstanceLAL(self._X, self._y, mode, data_path, cls_est, train_slt,\n **kwargs)", "def secondaryStrategy(self, strategy):\n assert isinstance(strategy, SimpleStrategy)\n self.__secondaryStrategy = strategy", "def use_strategy(new_strategy):\n warnings.warn(\n \"use_strategy() is deprecated and will be removed in the future.\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n def wrapped_class(klass):\n klass._meta.strategy = new_strategy\n return klass\n return wrapped_class", "def _resolve_arg(action, choices, param, required, typ):\n name, _param = param\n _required = None\n del param\n if _param[\"typ\"] in simple_types:\n typ = _param[\"typ\"]\n # elif (\n # isinstance(_param[\"typ\"], str)\n # and _param[\"typ\"].startswith(\"<class '\")\n # and _param[\"typ\"].endswith(\"'>\")\n # ):\n # typ = _param[\"typ\"][8:-2]\n elif _param[\"typ\"] == \"dict\" or name.endswith(\"kwargs\"):\n typ, required = \"loads\", not name.endswith(\"kwargs\")\n elif _param[\"typ\"]:\n from doctrans.emitter_utils import ast_parse_fix\n\n parsed_type = ast_parse_fix(_param[\"typ\"])\n for node in walk(parsed_type):\n if isinstance(node, Tuple):\n maybe_choices = tuple(\n get_value(elt)\n for elt in node.elts\n if isinstance(elt, (Constant, Str))\n )\n if len(maybe_choices) == len(node.elts):\n choices = maybe_choices\n elif isinstance(node, Name):\n if node.id == \"Optional\":\n _required = False\n elif node.id in simple_types:\n typ = node.id\n elif node.id not in frozenset((\"Union\",)):\n typ = FALLBACK_TYP\n\n if node.id == \"List\":\n action = \"append\"\n if _required is None and (typ or \"\").lower() in frozenset(\n (\"str\", \"complex\", \"int\", \"float\", \"anystr\", \"list\", \"tuple\", \"dict\")\n ):\n _required = True\n\n # if isinstance(_param.get(\"default\"), (list, tuple)):\n # if len()\n # typ, action = None, \"append\"\n\n # if isinstance(param.get(\"default\"), (Constant, Str, Num)):\n # param[\"default\"] = get_value(param[\"default\"])\n return (\n action,\n choices,\n required if _required is None else _required,\n typ,\n (name, _param),\n )", "def role_map(field_name):\n\n field_name = field_name.lower()\n\n if field_name=='label' or field_name=='score':\n return field_name\n elif field_name=='id':\n return 'non-predictor'\n else:\n return 'predictor'", "def get_strategy(cls, name, ns=None):\n ns = ns or cls.__strategy_ns__\n if ns is None:\n raise RuntimeError(\n _('No namespace provided and __strategy_ns__ unset'))\n\n LOG.debug('Looking for strategy %s in %s', name, ns)\n\n return utils.import_class(ns + \".\" + name)", "def _get_translation_factory_and_field(self):\n raise NotImplementedError()", "def put_strategy_event(self, strategy: SpreadStrategyTemplate):\n pass", "def _computeName(self, solver):\n template = Template(\"$type $implem, $params -- over $problem\")\n solver['_generatedName'] = True\n return template.substitute(\n type=solver['type'], implem=solver['implementation'],\n problem=solver['problem']['name'],\n params=', '.join(\n '='.join(p) for p in solver['parameters'].iteritems()))", "def resolve_alias(name: str) -> str:\n ...", "def resolve_method(self, name):\n\n\t\tif \".\" in name:\n\t\t\tifname, name = name.rsplit(\".\", 1)\n\t\telse:\n\t\t\tifname = None\n\n\t\tfor iface in self.interfaces:\n\t\t\tif iface.name == ifname or ifname is None:\n\t\t\t\tfor method in iface.methods:\n\t\t\t\t\tif method.name == name:\n\t\t\t\t\t\treturn iface, method\n\t\telse:\n\t\t\treturn None, None", "def resolve_path_name_for_parameter(song, param):\n return resolve_name_for_path(song, resolve_path_for_parameter(param), param)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a ZClass for 'base_class' in 'pack' (before a ProductContext is available). 'pack' may be either the module which is to contain the ZClass or its 'globals()'. If 'nice_name' is passed, use it as the name for the created class, and create the "ugly" '_ZClass_for_...' name as an alias; otherwise, just use the "ugly" name. Register the ZClass under its meta_type in the Products registries.
def createZClassForBase( base_class, pack, nice_name=None, meta_type=None ): d = {} zname = '_ZClass_for_' + base_class.__name__ if nice_name is None: nice_name = zname exec 'class %s: pass' % nice_name in d Z = d[nice_name] Z.propertysheets = OFS.PropertySheets.PropertySheets() Z._zclass_ = base_class Z.manage_options = () try: Z.__module__ = pack.__name__ setattr( pack, nice_name, Z ) setattr( pack, zname, Z ) except AttributeError: # we might be passed 'globals()' Z.__module__ = pack[ '__name__' ] pack[ nice_name ] = Z pack[ zname ] = Z if meta_type is None: if hasattr(base_class, 'meta_type'): meta_type=base_class.meta_type else: meta_type=base_class.__name__ base_module = base_class.__module__ base_name = base_class.__name__ key = "%s/%s" % (base_module, base_name) if base_module[:9] == 'Products.': base_module = base_module.split('.' )[1] else: base_module = base_module.split('.' )[0] info="%s: %s" % ( base_module, base_name ) Products.meta_class_info[key] = info # meta_type Products.meta_classes[key] = Z return Z
[ "def manage_addZClass(self, id, title='', baseclasses=[],\n meta_type='', CreateAFactory=0, REQUEST=None,\n zope_object=0):\n if bad_id(id) is not None:\n raise 'Bad Request', (\n 'The id %s is invalid as a class name.' % id)\n if not meta_type: meta_type=id\n\n r={}\n for data in self.aq_acquire('_getProductRegistryData')('zclasses'):\n r['%(product)s/%(id)s' % data]=data['meta_class']\n\n bases=[]\n for b in baseclasses:\n if Products.meta_classes.has_key(b):\n bases.append(Products.meta_classes[b])\n elif r.has_key(b):\n bases.append(r[b])\n else:\n raise 'Invalid class', b\n\n Z=ZClass(id, title, bases, zope_object=zope_object)\n Z._zclass_.meta_type=meta_type\n self._setObject(id, Z)\n\n if CreateAFactory and meta_type:\n self.manage_addDTMLMethod(\n id+'_addForm',\n id+' constructor input form',\n addFormDefault % {'id': id, 'meta_type': meta_type},\n )\n constScript = PythonScript(id+'_add')\n constScript.write(addDefault % {'id': id, 'title':id+' constructor'})\n self._setObject(constScript.getId(), constScript)\n self.manage_addPermission(\n id+'_add_permission',\n id+' constructor permission',\n 'Add %ss' % meta_type\n )\n self.manage_addPrincipiaFactory(\n id+'_factory',\n id+' factory',\n meta_type,\n id+'_addForm',\n 'Add %ss' % meta_type\n )\n\n Z=self._getOb(id)\n Z.propertysheets.permissions.manage_edit(\n selected=['Add %ss' % id])\n Z.manage_setPermissionMapping(\n permission_names=['Create class instances'],\n class_permissions=['Add %ss' % meta_type]\n )\n if REQUEST is not None:\n return self.manage_main(self,REQUEST, update_menu=1)", "def dynamic_class_creation(name, base=object):\n # Protected name in the schema\n if name in [\n \"__schema^2__\",\n ]:\n return None\n schema_entry = aapi_schema[\"AAPI_schema\"][name]\n helper_string = _construct_docstring(schema_entry)\n atype, ptype, delimiter = _determine_type(schema_entry)\n status = schema_entry.get(\"status\", \"production\")\n\n new_class = type(\n name,\n (base,),\n dict(\n __doc__=helper_string,\n name=name,\n atype=atype,\n ptype=ptype,\n delimiter=delimiter,\n status=status,\n ),\n )\n return new_class", "def __build_class__(func, name, *bases, metaclass=None, **kwds): # real signature unknown; restored from __doc__\n pass", "def newclassname(self, bases: Tuple[Type, ...]) -> str:\n namegetter = attrgetter(\"__name__\")\n names = list(map(namegetter, bases))\n # names.reverse()\n return \"+\".join(names)", "def friendly_class(*names_for_hacks_up):\n if names_for_hacks_up and len(names_for_hacks_up) == 1:\n arg = names_for_hacks_up[0]\n if not isinstance(arg, str):\n # Decorator applied without arguments\n # Use class name, apply to class\n try:\n name = arg.__qualname__\n except AttributeError:\n raise RuntimeError('@hacks.friendly_class: ' +\n 'cannot get class name for ' + str(arg))\n return friendly_class(name)(arg)\n\n def friendly_class_decorator(original_cls):\n cache = {}\n\n def reclassify(_):\n return _cached_effective_wrapped_up_class(cache, original_cls,\n names_for_hacks_up)\n\n class MetaAutoreparenting(type):\n def __call__(cls, *args, **kwds):\n original_object = type.__call__(cls, *args, **kwds)\n return mutants.ClassHopperMutant(original_object, reclassify)\n\n class AutoReparenting(original_cls, metaclass=MetaAutoreparenting):\n pass\n\n return AutoReparenting\n\n return friendly_class_decorator", "def with_metaclass(meta, *bases):\r\n return meta('NewBase', bases, {})", "def scoped(base_class, base_dn):\n class Meta:\n proxy = True\n import re\n suffix = re.sub('[=,]', '_', base_dn)\n name = \"%s_%s\" % (base_class.__name__, str(suffix))\n new_class = type(name, (base_class,), {'base_dn': base_dn, '__module__': base_class.__module__, 'Meta': Meta})\n return new_class", "def _override(self, name, obj):\n path = name.split('.')\n assert len(path) > 1, 'module name not provided'\n obj_name = path[-1]\n\n objs = self._resolvePath(path[:-1])\n container = objs[-1]\n try:\n original_class = getattr(container, obj_name, None)\n setattr(container, obj_name, obj)\n self._original.append((container, obj_name, original_class))\n except TypeError:\n # We have a static class; we will have to modify its container.\n # This works for global functions in gtk too because their\n # container is an ordinary python module (fake_gtk).\n name = container.__name__\n prev_container = objs[-2]\n subclass = type(name, (container, ), {obj_name: obj})\n setattr(prev_container, name, subclass)\n self._original.append((prev_container, name, container))", "def newClass(self, name = None):\n logger.debug(\"Creating new class: %s\" % (name))\n symCls = SymbolClass(self.featureSet, name = name)\n self.symbolClasses[symCls.name] = symCls\n\n #self.count += 1;\n #self.features.append([])\n #if name == None:\n #name = str(self.count)\n #self.names.append(name)\n return symCls.name", "def factory(cls, sitename: str):\n return cls.subclasses[sitename]", "def getPluginClass(self, variety, name):\n\t\treturn self.getPluginItem(variety, name, 0)", "def subclass(klass, *args, **kws):\n kws['baseclass'] = klass\n if isinstance(klass, Template):\n templateAPIClass = klass\n else:\n templateAPIClass = Template\n return templateAPIClass.compile(*args, **kws)", "def create_class(self, superclass_name=None):\n java_class = ClassDeclaration('Class_' + str(self.counter), [])\n if superclass_name:\n java_class.extends = Type(Name(superclass_name))\n self.counter += 1\n return java_class", "def _create_wrapper(cls_spec, element_info, myself):\n # only use the meta class to find the wrapper for BaseWrapper\n # so allow users to force the wrapper if they want\n if cls_spec != myself:\n obj = object.__new__(cls_spec)\n obj.__init__(element_info)\n return obj\n\n new_class = cls_spec.find_wrapper(element_info)\n obj = object.__new__(new_class)\n\n obj.__init__(element_info)\n\n return obj", "def base(cls: T) -> T:\n base.classes.add(cls)\n return cls", "def get_obj_class(klass, _quanta, _logger):\n class QuantaObj(klass):\n quanta = _quanta\n logger = _logger\n\n return QuantaObj", "def create_new_subclass(cls, name, **kwargs):\n kwargs = kwargs.copy()\n kwargs['name'] = name\n name = name.replace(' ', '')\n return CustomMeta(name, (CustomViewer,), kwargs)", "def create_item(name, class_name, parent_ctx=get_current_context()):\n if class_name == \"Context\":\n return create_context(name, parent_ctx)\n parent_ctx = get_item(parent_ctx)\n item = ix.create_object(name, class_name, parent_ctx.get_ix_node())\n if item:\n return get_item(item)\n else:\n return None", "def register_subclass(cls, typ, supertyp):\n if supertyp not in cls.TYPE_CODES:\n raise ValueError(\"Superclass not registered: %r\" % (supertyp,))\n\n typecode = cls.TYPE_CODES[supertyp]\n cls.TYPE_CODES[typ] = typecode\n PROXY_TYPES[typ] = PROXY_TYPES[supertyp]\n return cls.OBJ_PACKERS[typecode][2]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a Z Class
def manage_addZClass(self, id, title='', baseclasses=[], meta_type='', CreateAFactory=0, REQUEST=None, zope_object=0): if bad_id(id) is not None: raise 'Bad Request', ( 'The id %s is invalid as a class name.' % id) if not meta_type: meta_type=id r={} for data in self.aq_acquire('_getProductRegistryData')('zclasses'): r['%(product)s/%(id)s' % data]=data['meta_class'] bases=[] for b in baseclasses: if Products.meta_classes.has_key(b): bases.append(Products.meta_classes[b]) elif r.has_key(b): bases.append(r[b]) else: raise 'Invalid class', b Z=ZClass(id, title, bases, zope_object=zope_object) Z._zclass_.meta_type=meta_type self._setObject(id, Z) if CreateAFactory and meta_type: self.manage_addDTMLMethod( id+'_addForm', id+' constructor input form', addFormDefault % {'id': id, 'meta_type': meta_type}, ) constScript = PythonScript(id+'_add') constScript.write(addDefault % {'id': id, 'title':id+' constructor'}) self._setObject(constScript.getId(), constScript) self.manage_addPermission( id+'_add_permission', id+' constructor permission', 'Add %ss' % meta_type ) self.manage_addPrincipiaFactory( id+'_factory', id+' factory', meta_type, id+'_addForm', 'Add %ss' % meta_type ) Z=self._getOb(id) Z.propertysheets.permissions.manage_edit( selected=['Add %ss' % id]) Z.manage_setPermissionMapping( permission_names=['Create class instances'], class_permissions=['Add %ss' % meta_type] ) if REQUEST is not None: return self.manage_main(self,REQUEST, update_menu=1)
[ "def add_class(self, class_):\n self.classes.append(class_)", "def AddZLayer(self, *args):\n return _Graphic3d.Graphic3d_StructureManager_AddZLayer(self, *args)", "def add_object_class(self, parsername, kind) :\n self.object_classes[parsername] = kind", "def newClass(self, name = None):\n logger.debug(\"Creating new class: %s\" % (name))\n symCls = SymbolClass(self.featureSet, name = name)\n self.symbolClasses[symCls.name] = symCls\n\n #self.count += 1;\n #self.features.append([])\n #if name == None:\n #name = str(self.count)\n #self.names.append(name)\n return symCls.name", "def test_add_class(self):\n test_node = package_dependency.JavaPackage(self.TEST_PKG_1)\n mock_class_node = create_mock_java_class()\n test_node.add_class(mock_class_node)\n self.assertEqual(test_node.classes,\n {mock_class_node.name: mock_class_node})", "def add_class(self, cid, code=None, selected=False):\n temp_class = self.Class(self, cid, code=code, selected=selected)\n for idx, kelas in enumerate(self._classes):\n if kelas.id == temp_class.id:\n self._classes[idx] = temp_class\n return\n self._classes.append(temp_class)", "def createZClassForBase( base_class, pack, nice_name=None, meta_type=None ):\n d = {}\n zname = '_ZClass_for_' + base_class.__name__\n\n if nice_name is None:\n nice_name = zname\n\n exec 'class %s: pass' % nice_name in d\n\n Z = d[nice_name]\n Z.propertysheets = OFS.PropertySheets.PropertySheets()\n Z._zclass_ = base_class\n Z.manage_options = ()\n\n try:\n Z.__module__ = pack.__name__\n setattr( pack, nice_name, Z )\n setattr( pack, zname, Z )\n except AttributeError: # we might be passed 'globals()'\n Z.__module__ = pack[ '__name__' ]\n pack[ nice_name ] = Z\n pack[ zname ] = Z\n\n if meta_type is None:\n if hasattr(base_class, 'meta_type'): meta_type=base_class.meta_type\n else: meta_type=base_class.__name__\n\n base_module = base_class.__module__\n base_name = base_class.__name__\n\n key = \"%s/%s\" % (base_module, base_name)\n\n if base_module[:9] == 'Products.':\n base_module = base_module.split('.' )[1]\n else:\n base_module = base_module.split('.' )[0]\n\n info=\"%s: %s\" % ( base_module, base_name )\n\n Products.meta_class_info[key] = info # meta_type\n Products.meta_classes[key] = Z\n\n return Z", "def register_class_type(typ):\n _CLASS_TYPES.add(typ)", "def test_add_class(self):\n body = ModelClass()\n response = self.client.open(\n '/pablokvitca/classdeck-api/1.0.0/class/filtered',\n method='GET',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def add(obj, namespace):", "def test_add_class_duplicate(self):\n test_node = package_dependency.JavaPackage(self.TEST_PKG_1)\n mock_class_node = create_mock_java_class()\n test_node.add_class(mock_class_node)\n test_node.add_class(mock_class_node)\n self.assertEqual(test_node.classes,\n {mock_class_node.name: mock_class_node})", "def load_inference_classes(self):\n\n self.add_class(\"private\", 1, \"private\")\n # self.add_class(\"Source\", 1, \"name\")\n # self.add_class(\"Source\", 2, \"name\")\n # self.add_class(\"Source\", 3, \"name\")\n # etc...\n return", "def addClass(self, cls):\n if not hasattr(cls, '_matches_data'):\n raise AttributeError(\"Class '{}' has no '_matches_data' method\".format(cls.__name__))\n self._registry.add(cls)", "def create_class(self, superclass_name=None):\n java_class = ClassDeclaration('Class_' + str(self.counter), [])\n if superclass_name:\n java_class.extends = Type(Name(superclass_name))\n self.counter += 1\n return java_class", "def add_layer(self, bottom, layer_type, counter, **kwargs):\n suffix = \"\" if self.counters.get(counter) == None else str(self.counters.get(counter, 1))\n name = counter + suffix\n layer = None\n if self.__is_sequence(bottom):\n layer = layers.__getattr__(layer_type)(*bottom, **kwargs)\n else:\n layer = layers.__getattr__(layer_type)(bottom, **kwargs)\n self.n.__setattr__(name, layer)\n self.counters[counter] = self.counters.get(counter, 1) + 1\n return layer", "def addZA( self, ZA, suffix = \"\" ) :\n\n sZA = endlmisc.strZASuffix( ZA, suffix )\n for z in self.zas :\n if ( z.sZA == sZA ) : raise Exception( \"\\nError in endlProject.addZA: za = %s already present in this endlProject\" % sZA )\n z = endlZA.endlZA( ZA, self.yi, database = None, workDir = self.workDir, suffix = suffix, readOnly = self.readOnly, bdflsFile = self.bdflsFile )\n self.zas.append( z )\n return z", "def _register_classes(classes, addon_name_for_counter=None):\n\n from bpy.utils import register_class\n\n class_count = 0\n for cls in classes:\n register_class(cls)\n class_count += 1\n if addon_name_for_counter:\n print(f\"{addon_name_for_counter}: Registered {str(class_count)} classes\")", "def add_class_to_node(node, classname):\n\n if 'class' in node.attrib:\n node.attrib['class'] += ' ' + classname\n else:\n node.attrib['class'] = classname", "def _add_hyperbox(self, xl, xu, cls):\n # add column to V\n dV = np.zeros((self.n, self.m + 1))\n dV[:, :-1] = self.V\n if xl is not None:\n dV[:, -1] = xl\n self.V = dV\n # add column to W\n dW = np.zeros((self.n, self.m + 1))\n dW[:, :-1] = self.W\n if xu is not None:\n dW[:, -1] = xu\n self.W = dW\n # set class of new hyperbox\n # TODO: add clustering support, where if d==0, B_cls[-1] = p+1\n self.B_cls = np.append(self.B_cls, cls)\n # increment number-of-hyperboxes counter\n self.m += 1\n # return classification\n return cls" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create Z instance. If called with a RESPONSE, the RESPONSE will be redirected to the management screen of the new instance's parent Folder. Otherwise, the instance will be returned.
def createInObjectManager(self, id, REQUEST, RESPONSE=None): i=mapply(self._zclass_, (), REQUEST) try: i._setId(id) except AttributeError: i.id=id folder=durl=None if hasattr(self, 'Destination'): d=self.Destination if d.im_self.__class__ is FactoryDispatcher: folder=d() if folder is None: folder=self.aq_parent if not hasattr(folder,'_setObject'): folder=folder.aq_parent folder._setObject(id, i) if RESPONSE is not None: try: durl=self.DestinationURL() except: durl=REQUEST['URL3'] RESPONSE.redirect(durl+'/manage_workspace') else: # An object is not guarenteed to have the id we passed in. id = i.getId() return folder._getOb(id)
[ "def create_project():\n data = {'name': 'new project', 'id': 123}\n headers = {'location': 'http://chen.rotemlevy.name/project/123'}\n return Response(data, status=201, headers=headers)", "def create(self) -> None:\n url = f\"{self.base_url()}/loop/create/{self.name}?templateName={self.template}\"\n instance_details = self.send_message_json('POST',\n 'Create Loop Instance',\n url)\n self.details = instance_details", "def open_create(self):\n base.Button(self._driver, self._locators.CREATE_BTN_CSS).click()\n return self.create_obj_cls(self._driver)", "def create(cls, **kwargs):", "def create(self, request):\n status = request.POST.get('status', False)\n if status and status.lower() == 'false':\n status = False\n if status:\n post_data = QueryDict(request.read())\n response = process_async_api_manifestation(\n post_data, request.user)\n if 'success' in response:\n response.pop('success')\n return response\n else:\n try:\n return create_packages_via_api(request)\n except Exception as e:\n return {\n 'rmk': 'Package creation API error. Package might be '\n 'saved. Please contact tech.admin@delhivery.com.'\n ' Error message is {0}. Quote this error message'\n ' while reporting.'.format(e.message),\n 'success': False\n }", "def test_backup_create_instance(self):\n result = instance_info.dbaas.backups.create(BACKUP_NAME,\n instance_info.id,\n BACKUP_DESC)\n assert_equal(BACKUP_NAME, result.name)\n assert_equal(BACKUP_DESC, result.description)\n assert_equal(instance_info.id, result.instance_id)\n assert_equal('NEW', result.status)\n instance = instance_info.dbaas.instances.list()[0]\n assert_equal('BACKUP', instance.status)\n global backup_info\n backup_info = result", "def _process_create_response(self, request, response):\n return self.to_resource(response[self.container])", "def new_zone(options, *args, **kwargs):\n if not options.options:\n print(bcolors.FAIL + \"The `new` command requires a `name` argument: the zone name.\" + bcolors.ENDC)\n sys.exit(1)\n\n name = options.options[0]\n\n print(\"Creating new zone \\\"{bcolors.BOLD}{name}{bcolors.ENDC}\\\"... \".format(bcolors=bcolors, name=name), end='')\n resp = api_call('/zones', method='POST', payload={'name': name})\n if resp.status_code == 200:\n print(bcolors.OKGREEN + \"done\" + bcolors.ENDC + ', uuid=' + resp.json()['uuid'])\n else:\n print(bcolors.FAIL + \"error\" + bcolors.ENDC)\n print(resp.json())", "def create(self):\n self._assert_c8y()\n # 1_ create the group\n group_json = self._to_json(is_root=True)\n response_json = self.c8y.post('/inventory/managedObjects', group_json)\n group_id = response_json['id']\n # 2_ create child groups recursively\n if self._added_child_groups:\n self._create_child_groups(parent_id=group_id, parent=self, groups=self._added_child_groups)\n # 3_ parse/return result\n if self._added_child_groups:\n # if there were child assets we need to read the object again\n response_json = self.c8y.get('/inventory/managedObjects/' + group_id)\n result = self.from_json(response_json)\n result.c8y = self.c8y\n return result", "def create(self):\n evaluate_request(\n self.__v1_api.create_namespaced_service(\n namespace=self.__namespace, body=self.zk_service, async_req=True\n ),\n allowed_statuses=[409],\n )\n evaluate_request(\n self.__v1_api.create_namespaced_config_map(\n namespace=self.__namespace, body=self.zk_configmap, async_req=True\n ),\n allowed_statuses=[409],\n )\n evaluate_request(\n self.__v1_policy_api.create_namespaced_pod_disruption_budget(\n namespace=self.__namespace, body=self.zk_pdb, async_req=True\n ),\n allowed_statuses=[409],\n )\n evaluate_request(\n self.__v1_apps_api.create_namespaced_stateful_set(\n namespace=self.__namespace, body=self.zk, async_req=True\n ),\n allowed_statuses=[409],\n )\n\n zk_started = False\n while not zk_started:\n try:\n stream(\n self.__v1_api.connect_get_namespaced_pod_exec,\n \"zk-0\",\n self.__namespace,\n command=[\"zkOk.sh\"],\n stderr=False,\n stdin=False,\n stdout=True,\n tty=False,\n )\n zk_started = True\n except Exception as _:\n sleep(2)", "def do_create(self, arg):\n \"\"\"\n method that creates a new instance of a class, saves it\n (to the JSON file) and prints the id. Ex: $ create BaseModel\n \"\"\"\n if not arg:\n print(\"** class name missing **\")\n elif arg not in HBNBCommand.className.keys():\n print(\"** class doesn't exist **\")\n else:\n obj = HBNBCommand.className[arg]()\n HBNBCommand.className[arg].save(obj)\n print(obj.id)", "def new(self, uri, constructor_args):\n\n self.log.info(\"Creating new LiveActivityGroup with arguments: %s\" % constructor_args)\n route = Path().get_route_for('LiveActivityGroup', 'new')\n url = \"%s%s\" % (uri, route)\n request_response = self._api_post_json(url, constructor_args)\n if request_response.url:\n self.absolute_url = request_response.url.replace(\"view.html\", \"view.json\")\n self.fetch()\n self.log.info(\"Created new LiveActivityGroup with url=%s, data_hash is now %s\" % (self.absolute_url, self.data_hash))\n return self\n else:\n self.log.info(\"Created new LiveActivityGroup %s but returned False\" % self)\n return False", "def create(self, request, *args, **kwargs):\n data = self.request.DATA\n packaged = 'upload' in data\n form = (NewPackagedForm(data) if packaged\n else NewManifestForm(data))\n\n if not form.is_valid():\n return Response(form.errors, status=HTTP_400_BAD_REQUEST)\n\n if not packaged:\n upload = FileUpload.objects.create(\n user=getattr(request, 'amo_user', None))\n # The hosted app validator is pretty fast.\n tasks.fetch_manifest(form.cleaned_data['manifest'], upload.pk)\n else:\n upload = form.file_upload\n # The packaged app validator is much heavier.\n tasks.validator.delay(upload.pk)\n\n log.info('Validation created: %s' % upload.pk)\n self.kwargs = {'pk': upload.pk}\n # Re-fetch the object, fetch_manifest() might have altered it.\n upload = self.get_object()\n serializer = self.get_serializer(upload)\n status = HTTP_201_CREATED if upload.processed else HTTP_202_ACCEPTED\n return Response(serializer.data, status=status)", "def _create_archive_obj(self):\n ArtifactoryPath = self.cls\n folder = ArtifactoryPath(\"http://b/artifactory/reponame/folder\")\n constructed_url = \"http://b/artifactory/api/storage/reponame/folder\"\n responses.add(\n responses.GET,\n constructed_url,\n status=200,\n json=self.dir_stat,\n )\n archive_obj = folder.archive(check_sum=True)\n return archive_obj", "async def create_vpatch(self, instance=None, domain='example.com', action_name='.env'):\n\n url = f'https://{self.__api}/v1/objects/hint/create'\n body = {\"type\": \"vpatch\", \"action\": [{\"point\": [\"action_name\"], \"type\": \"iequal\", \"value\": action_name},\n {\"point\": [\"action_ext\"], \"type\": \"absent\", \"value\": \"\"},\n {\"point\": [\"header\", \"HOST\"], \"type\": \"iequal\",\n \"value\": domain}],\n \"clientid\": self.clientid, \"validated\": True, \"point\": [[\"action_name\"]], \"attack_type\": \"any\"}\n if instance:\n body['action'].append({\"point\": [\"instance\"], \"type\": \"equal\", \"value\": instance})\n\n async with aiohttp.ClientSession() as session:\n response = await self.fetch(session, url, body=body)\n logger.debug(f'The function create_vpatch has been successful by filter {body}'\n f'It has created a virtual patch')\n logger.info(f'The virtual patch has been created')\n return response", "def create(self):\n if self.exists() is not False:\n self.load()\n return\n\n userid = self.insertUser()\n\n existingTransaction = self.beginTransaction()\n action = self.daofactory(classname=\"Workflow.New\")\n action.execute(spec=self.spec, owner=userid, name=self.name,\n task=self.task, wfType=self.wfType,\n alt_fs_close=self.alternativeFilesetClose,\n priority=self.priority,\n conn=self.getDBConn(),\n transaction=self.existingTransaction())\n\n self.id = self.exists()\n self.commitTransaction(existingTransaction)\n logging.info(\"Workflow id %d created for %s\", self.id, self.name)\n return", "def create(cls, argv):\n request = cls(argv, dict());\n return request;", "def create(self):\n ret = self._get_attr(\"create\")\n return ret", "def createVM(request, VMname, imageName, flavorName):\n api.createVM(VMname, imageName, flavorName)\n return HttpResponseRedirect('/project_space/manage')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Edit an existing Workspace.
def edit(self, name=UNSPECIFIED, extraParams={}): import labstep.entities.workspace.repository as workspaceRepository return workspaceRepository.editWorkspace(self, name, extraParams=extraParams)
[ "def workspace(string, projectPath=\"string\", updateAll=bool, fileRuleList=bool, fileRuleEntry=\"string\", renderTypeEntry=\"string\", renderType=\"string\", active=bool, expandName=\"string\", objectType=\"string\", saveWorkspace=bool, shortName=bool, objectTypeList=bool, fileRule=\"string\", filter=bool, newWorkspace=bool, listFullWorkspaces=bool, listWorkspaces=bool, fullName=bool, objectTypeEntry=\"string\", variableEntry=\"string\", rootDirectory=bool, update=bool, list=bool, renderTypeList=bool, variableList=bool, removeVariableEntry=\"string\", create=\"string\", baseWorkspace=\"string\", directory=\"string\", variable=\"string\", removeFileRuleEntry=\"string\", openWorkspace=bool):\n pass", "def edit(sheet):\n open_with_editor(path(sheet))", "def update_workspace(self, workspace_id, workspace_obj):\n _op = fresh_operation('update_workspace')\n _op['method'] = 'PUT'\n _op['path'] = '/workspaces/' + str(workspace_id)\n _op['json'] = workspace_obj\n\n expected = ['Result', 'Workspace']\n\n prepped_request = self._base.prepare_request(_op)\n response = self._base.request(prepped_request, expected, _op)\n\n return response", "def editProjectInfo(self, **data):\n check, content = editProject(self.token, self.projectId, **data)\n if check:\n return None\n return content", "def edit_isp(isp_id):\n isp = db_session.query(ISP).filter_by(id=isp_id).one()\n\n if request.method == \"POST\":\n if request.form[\"choice\"] == \"edit\":\n isp.name = request.form[\"name\"]\n db_session.add(isp)\n db_session.commit()\n flash(\"ISP Successfully Edited.\")\n return redirect(url_for(\"show_isps\"))\n else:\n return render_template(\"edit_isp.html\", isp=isp, title=\"Edit ISP\")", "def edit_project(nd, project_no, project_id):\n item = db_helper.get_specific_project(project_id)\n if item is None:\n flash(\"Requested project does not exist!\")\n return redirect(url_for(\"index\"))\n if not g.user or g.user._id != item.author:\n flash(\"You can not edit other's project!\")\n return redirect(url_for(\"project_item\", nd=nd, project_no=project_no, project_id=project_id))\n if request.method == \"POST\":\n url_edited = db_helper.update_project(project_id, request.form)\n flash(\"Your project's been successfully edited!\")\n if url_edited:\n return redirect(url_for(\"loading_page\"))\n else:\n return redirect(url_for(\"project_item\", nd=nd, project_no=project_no, project_id=project_id))\n else:\n return render_template(\"edit_project.html\", item=item)", "def _put_workspace(key, workspace):\n _WORKSPACES[key] = workspace", "def edit(self) -> 'TymeSheet':\n with NamedTemporaryFile('w+') as tf:\n tf.write(str(self))\n tf.flush()\n editor = os.environ.get('EDITOR', 'vi')\n subprocess.call('{editor} {tf.name}'.format_map(locals()), shell=True)\n return TymeSheet.from_file(tf.name)", "def on_edit_clicked(self, widget):\n sel = self.treeview_sources.get_selection()\n (model, iter) = sel.get_selected()\n if not iter:\n return\n old_source_entry = model.get_value(iter, LIST_ENTRY_OBJ)\n dialog = DialogEdit(self.window_main, self.sourceslist,\n old_source_entry, self.datadir)\n if dialog.run() == Gtk.ResponseType.OK:\n try:\n self.backend.ReplaceSourceEntry(str(old_source_entry),\n str(dialog.new_source_entry))\n except dbus.DBusException as e:\n if e._dbus_error_name == 'com.ubuntu.SoftwareProperties.PermissionDeniedByPolicy':\n logging.error(\"Authentication canceled, changes have not been saved\")", "def suite_edit(suite, datasource, directory, jupyter, batch_kwargs):\n _suite_edit(\n suite,\n datasource,\n directory,\n jupyter,\n batch_kwargs,\n usage_event=\"cli.suite.edit\",\n )", "def getActiveWorkspace(self) -> ghidra.framework.model.Workspace:\n ...", "def setWorkspace(self, workspaceName):\n if not self.contextHelper.isAccessibleWorkspaceName(workspaceName):\n raise Exception('Specified workspace not valid for your credentials')\n self.contextHelper.setWorkspace(workspaceName)", "def workspace(make_workspace):\n workspace = make_workspace()\n return workspace", "def update_playground():\n from flask import request\n\n if request.method != 'POST':\n abort(401)\n\n playground = Playground.get(id=request.form.get('id'))\n\n payload = create_change_payload('update', request) \n payload['playground']['id'] = int(request.form.get('id'))\n\n write_data(payload)\n\n return redirect('%s/playground/%s.html?action=editing_thanks' % (app_config.S3_BASE_URL, playground.slug))", "def buttonEditTeam_clicked(self):\n team_to_edit = copy.deepcopy(self.get_current_selected_team())\n current_index = self.listTeam.currentRow()\n if team_to_edit:\n edit_team_window = TeamEditorWindow(self._db, team_to_edit)\n # updates ui prior to opening\n edit_team_window.update_ui()\n if edit_team_window.exec_() == QDialog.DialogCode.Accepted:\n # Remove instance of team, add team back with member info, update ui\n self.league.teams.remove(team_to_edit)\n self.league.teams.insert(current_index, edit_team_window.team)\n self.update_ui()\n #print(\"Team Editor Saved\")\n else:\n #print(\"Team Editor Cancelled\")\n self.update_ui()\n else:\n self.warn(\"No League Selected\", \"You must select a league before editing it.\")", "def edit_workflow_command():\n return Command().command(_edit_workflow).require_clean().with_database(write=True).with_commit()", "def create_workspace(workspace_name, auth_domain_name, project=\"anvil-datastorage\"):\n\n # check if workspace already exists\n ws_exists, ws_exists_response = check_workspace_exists(workspace_name, project)\n\n if ws_exists is None:\n return False, ws_exists_response\n\n if not ws_exists: # workspace doesn't exist (404), create workspace\n # create request JSON\n create_ws_json = make_create_workspace_request(workspace_name, auth_domain_name, project) # json for API request\n\n # request URL for createWorkspace\n uri = f\"https://api.firecloud.org/api/workspaces\"\n\n # Get access token and and add to headers for requests.\n # -H \"accept: application/json\" -H \"Authorization: Bearer [token] -H \"Content-Type: application/json\"\n headers = {\"Authorization\": \"Bearer \" + get_access_token(), \"accept\": \"application/json\", \"Content-Type\": \"application/json\"}\n\n # capture response from API and parse out status code\n response = requests.post(uri, headers=headers, data=json.dumps(create_ws_json))\n status_code = response.status_code\n\n if status_code != 201: # ws creation fail\n print(f\"WARNING: Failed to create workspace with name: {workspace_name}. Check output file for error details.\")\n return False, response.text\n # workspace creation success\n print(f\"Successfully created workspace with name: {workspace_name}.\")\n return True, None\n\n # workspace already exists\n print(f\"Workspace already exists with name: {project}/{workspace_name}.\")\n print(f\"Existing workspace details: {json.dumps(json.loads(ws_exists_response), indent=2)}\")\n # make user decide if they want to update/overwrite existing workspace\n while True: # try until user inputs valid response\n update_existing_ws = input(\"Would you like to continue modifying the existing workspace? (Y/N)\" + \"\\n\")\n if update_existing_ws.upper() in [\"Y\", \"N\"]:\n break\n else:\n print(\"Not a valid option. Choose: Y/N\")\n if update_existing_ws.upper() == \"N\": # don't overwrite existing workspace\n deny_overwrite_message = f\"{project}/{workspace_name} already exists. User selected not to overwrite. Try again with unique workspace name.\"\n return None, deny_overwrite_message\n\n accept_overwrite_message = f\"{project}/{workspace_name} already exists. User selected to overwrite.\"\n return True, accept_overwrite_message # overwrite existing workspace - 200 status code for \"Y\"", "def edit_org(org_id):\n settings = Organisation.query.filter_by(id=org_id).first_or_404()\n form = OrganisationForm(obj=settings)\n \n if request.method == 'POST':\n form.populate_obj(settings)\n db.session.add(settings)\n db.session.commit()\n flash('Settings successfully edited', 'success')\n return redirect(url_for('admin.frontend_dashboard'))\n return render_template('admin/organisations/edit_org.html', form=form)", "def set_workspace(self, ws):\n if len(ws) == 0:\n self._g.set_workspace(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)\n else:\n if len(ws) == 4:\n self._g.set_workspace(ws[0], ws[1], 0.0, ws[2], ws[3], 0.0)\n else:\n if len(ws) == 6:\n self._g.set_workspace(ws[0], ws[1], ws[2], ws[3], ws[4], ws[5])\n else:\n raise MoveItCommanderException(\n \"Expected 0, 4 or 6 values in list specifying workspace\"\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve a list of Order Requests within this specific Workspace,
def getOrderRequests( self, count=UNSPECIFIED, name=UNSPECIFIED, status=UNSPECIFIED, tag_id=UNSPECIFIED, extraParams={} ): import labstep.entities.orderRequest.repository as orderRequestRepository extraParams = {"group_id": self.id, **extraParams} return orderRequestRepository.getOrderRequests( self.__user__, count=count, search_query=name, status=status, tag_id=tag_id, extraParams=extraParams, )
[ "def get_requests(self):\n return self._make_request('GET', '/requests')", "def open_orders():\n return _make_request('orders/own', private=True)['orders']", "def list_orders(self):\n return self._orders", "def get_orders(client, sheet_id):\r\n return client.get(f'/api/orders/sheet/{sheet_id}')", "async def futures_get_all_orders(self, **params):\r\n return await self.client_helper(\"futures_get_all_orders\", **params)", "def get_orders(self, status_id=None, start=None, limit=None):\n payload = {}\n if status_id is not None:\n payload['status_id'] = status_id\n if start is not None:\n payload['start'] = start\n if limit is not None:\n payload['limit'] = limit\n result = self.get(cc_urls['orders'], payload)\n return result['orders']", "def get_all_orders(self):\n return Order.objects.all()", "def open_orders(self):\n return self.get_qs().filter(~Q(status='C'))", "def getOrders(self):\n return Orders.StoreOrders().getOrdersForCustomer(self.customerId)", "def load_requests(self) -> List[Request]:\n with self._lock:\n return [v['request'] for v in self._requests.values()]", "def get_all_orders():\n\temail = get_jwt_identity()\n\tapprover = Approver.query.filter_by(email=email).first()\n\tadmin = Admin.query.filter_by(email=email).first()\n\n\tag_relation = []\n\troom_relation = []\n\tif not admin: # Get approves requests relations only for the logged in approver.\n\t\t# Get a list of all the orders of access groups this approver is responsible for\n\t\tag_relation = ApprovesAgRequest.query \\\n\t\t\t.filter_by(approver_id=approver.id) \\\n\t\t\t.join(AccessGroupRequest, AccessGroupRequest.id == ApprovesAgRequest.ag_request_id) \\\n\t\t\t.join(Reader, Reader.id == AccessGroupRequest.reader_id) \\\n\t\t\t.join(AccessGroup, AccessGroup.id == AccessGroupRequest.ag_id).all()\n\n\t\t# Get a list of all the orders of rooms this approver is responsible for\n\t\troom_relation = ApprovesRoomRequest.query \\\n\t\t\t.filter_by(approver_id=approver.id) \\\n\t\t\t.join(RoomRequest, RoomRequest.id == ApprovesRoomRequest.room_request_id) \\\n\t\t\t.join(Reader, Reader.id == RoomRequest.reader_id) \\\n\t\t\t.join(Room, Room.id == RoomRequest.room_id).all()\n\n\telse: # Get approves requests relations only for all approvers.\n\t\t# Get a list of all the orders of access groups of all responsible approvers.\n\t\tag_relation = ApprovesAgRequest.query \\\n\t\t\t.join(AccessGroupRequest, AccessGroupRequest.id == ApprovesAgRequest.ag_request_id) \\\n\t\t\t.join(Reader, Reader.id == AccessGroupRequest.reader_id) \\\n\t\t\t.join(AccessGroup, AccessGroup.id == AccessGroupRequest.ag_id).all()\n\n\t\t# Get a list of all the orders of rooms this approver is responsible for.\n\t\troom_relation = ApprovesRoomRequest.query \\\n\t\t\t.join(RoomRequest, RoomRequest.id == ApprovesRoomRequest.room_request_id) \\\n\t\t\t.join(Reader, Reader.id == RoomRequest.reader_id) \\\n\t\t\t.join(Room, Room.id == RoomRequest.room_id).all()\n\n\tag_orders = []\n\tfor ag in ag_relation:\n\t\t# Gets all the rooms in the access group\n\t\tag_room_relation = Room.query \\\n\t\t\t.join(CardReader, CardReader.room_b_id == Room.id) \\\n\t\t\t.join(gives_access_to, gives_access_to.c.cr_id == CardReader.id) \\\n\t\t\t.filter_by(ag_id=ag.ag_request.ag.id)\n\t\tjson = {\n\t\t\t\"type\": \"AG\",\n\t\t\t\"rooms\": [room.text_id for room in ag_room_relation],\n\t\t\t\"reader\": {\n\t\t\t\t\"email\": ag.ag_request.reader.email,\n\t\t\t\t\"name\": ag.ag_request.reader.name,\n\t\t\t\t\"surname\": ag.ag_request.reader.surname\n\t\t\t},\n\t\t\t\"approver\": {} if not admin else {\n\t\t\t\t\"email\": ag.ag_request.request_approver.approver.email,\n\t\t\t\t\"name\": ag.ag_request.request_approver.approver.name,\n\t\t\t\t\"surname\": ag.ag_request.request_approver.approver.surname\n\t\t\t},\n\t\t\t\"access_name\": ag.ag_request.ag.name,\n\t\t\t\"request_id\": ag.ag_request.id,\n\t\t\t\"ag_id\": ag.ag_request.ag.id,\n\t\t\t\"justification\": ag.ag_request.justification,\n\t\t\t\"requested_datetime\": ag.ag_request.datetime_requested.strftime('%Y-%m-%d')\n\t\t}\n\t\tag_orders.append(json)\n\n\troom_orders = [\n\t\t{\n\t\t\t\"type\": \"Room\",\n\t\t\t\"reader\": {\n\t\t\t\t\"email\": x.room_request.reader.email,\n\t\t\t\t\"name\": x.room_request.reader.name,\n\t\t\t\t\"surname\": x.room_request.reader.surname,\n\t\t\t},\n\t\t\t\"approver\": {} if not admin else {\n\t\t\t\t\"email\": x.room_request.request_approver.approver.email,\n\t\t\t\t\"name\": x.room_request.request_approver.approver.name,\n\t\t\t\t\"surname\": x.room_request.request_approver.approver.surname\n\t\t\t},\n\t\t\t\"access_name\": x.room_request.room.name,\n\t\t\t\"request_id\": x.room_request.id,\n\t\t\t\"room_id\": x.room_request.room.text_id,\n\t\t\t\"justification\": x.room_request.justification,\n\t\t\t\"requested_datetime\": x.room_request.datetime_requested.strftime('%Y-%m-%d')\n\t\t} for x in room_relation]\n\n\treturn ok({\"orders\": room_orders + ag_orders})", "def get_all_user_orders():\n current_user = get_jwt_identity()\n user = user_object.get_user_by_id(current_user)\n if user['admin'] == False:\n return jsonify({'message':'This is an admin route, you are not authorized to access it'}),401\n user = user_object.get_user_by_id(current_user)\n output = []\n placed_orders = parcel_object.get_all_orders()\n for order in placed_orders:\n output.append(order)\n if len(output) == 0:\n return jsonify({'message',\"There are no orders placed yet\"}),404\n return jsonify({'placed orders':output}),200", "def get_order(self):\n url = self._get_link(\"order\")\n if url:\n return self.client.orders.from_url(url)", "def get_queryset(self):\n return OrderQuerySet(self.model, using=self._db)", "def get_requests_list():\n query = db.session.query(\n Request, User.email, Title.title) \\\n .join(User, Request.user_id == User.id) \\\n .join(Title, Request.title_id == Title.id)\n requests = query.all() # If amount of requests supposed to be big, we need to use paging\n return [VMRequest(\n request_id=request.Request.id,\n user_id=request.Request.user_id,\n user_email=request.email,\n title_id=request.Request.title_id,\n title=request.title,\n timestamp=request.Request.timestamp) for request in requests] if requests else []", "def fetch_orders(self, worker=None):\n if not worker:\n worker = self.category\n return db_worker.fetch_orders(worker)", "def iter_requests(self) -> Iterator[Request]:\n with self._lock:\n values = list(self._requests.values())\n\n for v in values:\n yield v['request']", "def get_all_open_orders(self):\n orders = self.client.get_open_orders()\n print(orders)\n for x in range(len(orders)):\n if orders[x]['side'] == 'BUY':\n print('Buy {} price {}'.format(orders[x]['symbol'] ,orders[x]['price']))\n\n else:\n print('Sell {} price {}'.format(orders[x]['symbol'] ,orders[x]['price']))\n\n if orders == []:\n print('there is NO open order!!')\n return orders", "def getRequestsListData(self, request, uh_params, ar_params):\n\n idx = lists.getListIndex(request)\n\n # get the current user\n user_entity = user_logic.getCurrentUser()\n\n # only select the Invites for this user that haven't been handled yet\n # pylint: disable=E1103\n filter = {'user': user_entity}\n\n if idx == 0:\n filter['status'] = 'group_accepted'\n params = uh_params\n elif idx == 1:\n filter['status'] = 'new'\n params = ar_params\n else:\n return lists.getErrorResponse(request, \"idx not valid\")\n\n contents = helper.lists.getListData(request, params, filter,\n visibility='public')\n\n return lists.getResponse(request, contents)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the sharelink for the workspace. Returns
def getSharelink(self): import labstep.entities.sharelink.repository as shareLinkRepository return shareLinkRepository.getSharelink(self)
[ "def get_share(self, workspace_id, share_id):\n _op = fresh_operation('get_share')\n _op['method'] = 'GET'\n _op['path'] = '/workspaces/' + str(workspace_id) + '/shares/' + str(\n share_id)\n\n expected = 'Share'\n prepped_request = self._base.prepare_request(_op)\n response = self._base.request(prepped_request, expected, _op)\n\n return response", "def share_mode(self):\n return self._share_mode", "def share_url(network, page_url, **kwargs):\n\tconf = network_conf[network]\n\tshare_url = conf['share_url']\n\tsyntax = conf['syntax']\n\n\treturn url_resolver.get_share_url(share_url, syntax, page_url=page_url, **kwargs)", "def get_sharing_information(self):\n return self.list_item_all_fields.get_sharing_information()", "def getfshare(self, protocol, *sharename, **kwargs):", "def get_network_shared_stories(self):\n\n from .story import Story\n\n network_stories = Story.objects.filter(Q(share_with=self))\n return network_stories", "def getShareMode(self):\r\n return _osgDB.SharedStateManager_getShareMode(self)", "def gen_dir_share_link(token):\n return gen_shared_link(token, 'd')", "def gen_file_share_link(token):\n return gen_shared_link(token, 'f')", "def __get_shared_link(upload_path, mailto):\n\n global STEP_UPLOADDROPBOX_LINK, dbx\n\n try:\n link = dbx.sharing_create_shared_link(upload_path, short_url=False, pending_upload=None)\n return link.url\n\n except dropbox.exceptions.ApiError as sharedLinkError:\n\n if sharedLinkError.error.is_path(): # Evidence in the upload path does not exist\n print(Fore.RED + \" ✖ There is no evidence indicated by the upload path. Please, check the way to get the\"\n \" shared link.\\n\" + Fore.RESET)\n else: # Another error\n print(Fore.RED + \" ✖ Error to create the shared link of the evidence in Dropbox. Exception: \"\n + str(sharedLinkError) + \".\\n\" + Fore.RESET)\n\n # Prints a message or sends an email when an error occurs during the alert protocol\n email.print_error_notification_or_send_email(mailto, STEP_UPLOADDROPBOX_LINK)\n\n sys.exit(1)", "def work_link(work):\n for link_type in work._link:\n if link_type == \"file\" and work._file:\n return \"files/\" + work._file\n if link_type == \"link\" and hasattr(work, \"_url\") and work._url:\n return work._url\n if link_type == \"scholar\" and hasattr(work, \"_scholar\"):\n return work._scholar\n return None", "def get_vm_share(self, vm_id):\n if self.config:\n try:\n return self.config.get(vm_id, \"share\")\n except Exception, why:\n self._error_parse(why)\n return None\n else:\n self._error_config()\n return None", "def link(self):\n return self._book_dict[\"link\"]", "def get_shared_item(self, shared_url):\n ret = None\n \n if self.id_share_pattern.match(shared_url):\n id_search = self.id_share_pattern.search(shared_url)\n if id_search:\n file_id = id_search.group(5)\n #print('Match:', shared_url, ', fileid:', file_id)\n ret = self.get_shared_item_by_id(file_id)\n else:\n ret = self.client.get_shared_item(shared_url).__dict__\n \n if self.DEBUG:\n print(ret)\n #print('ret:', ret)\n return ret", "def share_key(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"share_key\")", "def get_shared(doctype, user=None, rights=None, *, filters=None, limit=None):\n\n\tif not user:\n\t\tuser = frappe.session.user\n\n\tif not rights:\n\t\trights = [\"read\"]\n\n\tshare_filters = [[right, \"=\", 1] for right in rights]\n\tshare_filters += [[\"share_doctype\", \"=\", doctype]]\n\tif filters:\n\t\tshare_filters += filters\n\n\tor_filters = [[\"user\", \"=\", user]]\n\tif user != \"Guest\":\n\t\tor_filters += [[\"everyone\", \"=\", 1]]\n\n\tshared_docs = frappe.get_all(\n\t\t\"DocShare\",\n\t\tfields=[\"share_name\"],\n\t\tfilters=share_filters,\n\t\tor_filters=or_filters,\n\t\torder_by=None,\n\t\tlimit_page_length=limit,\n\t)\n\n\treturn [doc.share_name for doc in shared_docs]", "def get_share(id):\n from db import Share\n cp = Share.query.filter_by(id=id)\n if cp.count() > 0:\n return cp.first()\n return None", "def link_workspace_document(workspace_doc_uid):", "def user_workspace_link_repository(self) -> UserWorkspaceLinkRepository:" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new Collection within the Workspace for Experiments or Protocols.
def newCollection(self, name, type="experiment"): import labstep.entities.collection.repository as collectionRepository return collectionRepository.newCollection( self.__user__, name=name, type=type, extraParams={ "group_id": self.id} )
[ "def _create_collection(cls):\n\n col_type = api.EDGE_COLLECTION if isinstance(cls, MetaEdgeBase) else api.DOCUMENT_COLLECTION\n\n col = cls.client.collections.get(cls.__collection_name__)\n if col is None:\n cls.client.collections.create(cls.__collection_name__, type=col_type)\n LOG.info(\"Created collection: %s\", cls)\n\n else:\n # check if type is good\n if col['type'] != col_type:\n raise exc.ArangoException(\n \"An existing collection has the wrong type, solve this manually!\",\n col, cls)\n\n LOG.info(\"Collection in use: %s\", cls)", "def createCollection(**args):\n addCreator(args)\n collectionModel = loadModel('collection')\n return collectionModel.createCollection(**args)", "def addSubCollections(self, names):\n import labstep.entities.collection.repository as collectionRepository\n types = {\"experiment_workflow\": \"experiment\",\n \"protocol_collection\": \"protocol\"}\n return collectionRepository.newCollections(self.__user__, names=names, type=types[self.type], extraParams={'outer_folder_id': self.id})", "def initialize_collections(self):\n COLLECTIONS = self.getCollectionsNames()\n\n try:\n collections_dict = {}\n for collection in COLLECTIONS:\n collections_dict[collection] = self.build_collection(collection)\n return collections_dict\n except Exception as e:\n print(e)", "def collection(self, identifier, other_attributes=None):\n record = self.new_record(\n VOPROV_ENTITY, identifier, None, other_attributes\n )\n record.add_asserted_type(VOPROV['Collection'])\n return record", "def collection(self, name):\n if name in self.collections:\n raise RuntimeError(f\"Collection name has already been used: '{name}'\")\n\n def wrapper(func):\n self.collections[name] = Collection(self.basedir, name, func)\n return func\n return wrapper", "def getDefaultCollection(self):\n \n pass", "def __init__(self, experiments):\n\n # Store experiments in list.\n if isinstance(experiments, list):\n # TODO: Deep copy.\n self.experiments += experiments\n else:\n self.experiments = [experiments]\n\n return", "def generate_data_collection(self):\n\n # NOTE: no xml_content to avoid using unsupported GridFS mock\n self.data = Data(\n template=self.template,\n user_id=\"1\",\n dict_content=None,\n title=\"title\",\n )\n self.data.save()\n\n self.data_structure_1 = CurateDataStructure(\n user=\"1\",\n template=self.template,\n name=\"data_structure_1\",\n data=self.data,\n )\n self.data_structure_1.save()\n\n self.data_structure_2 = CurateDataStructure(\n user=\"1\", template=self.template, name=\"data_structure_2\"\n )\n self.data_structure_2.save()\n\n self.data_structure_3 = CurateDataStructure(\n user=\"2\", template=self.template, name=\"data_structure_3\"\n )\n self.data_structure_3.save()\n\n self.data_collection = [\n self.data_structure_1,\n self.data_structure_2,\n self.data_structure_3,\n self.data,\n ]", "def load(self):\n # Get each document and place in collections list\n loaded_colls = []\n for doc in self._dbcollection.find():\n\n # decode and deserialize data\n collection = jsonpickle.decode(doc['jp_collection'], keys=True)\n\n # Add database id to collection object\n collection.db_id = doc['_id']\n loaded_colls.append(collection)\n if len(loaded_colls) <= 0:\n # Return empty collection\n return [Collection(\"My Collection\")]\n return loaded_colls", "def __init__(self, collection):\n self.collection = collection", "def create_collection(db=None, col='Test', edge=False, vertex=False, from_col=None, to_col=None, graph='Test'):\r\n # Case that an edge is True which requires the definition of from and to collections within vertices\r\n if edge and from_col and to_col:\r\n return db.graph(graph).create_edge_definition(\r\n edge_collection=col,\r\n from_vertex_collections=from_col,\r\n to_vertex_collections=to_col\r\n )\r\n # Case that an vertex is True which makes it available to the edge collections\r\n elif vertex:\r\n return db.graph(graph).create_vertex_collection(name=col)\r\n # Case of a standard collection which can be later referenced as edge or\r\n else:\r\n return db.create_collection(name=col)", "async def create(self, entity: ChoreCollection) -> ChoreCollection:\n ref_id_kw = {}\n if entity.ref_id != BAD_REF_ID:\n ref_id_kw[\"ref_id\"] = entity.ref_id.as_int()\n try:\n result = await self._connection.execute(\n insert(self._chore_collection_table).values(\n **ref_id_kw,\n version=entity.version,\n archived=entity.archived,\n created_time=entity.created_time.to_db(),\n last_modified_time=entity.last_modified_time.to_db(),\n archived_time=entity.archived_time.to_db()\n if entity.archived_time\n else None,\n workspace_ref_id=entity.workspace_ref_id.as_int(),\n ),\n )\n except IntegrityError as err:\n raise ChoreCollectionAlreadyExistsError(\n f\"Chore collection for workspace {entity.workspace_ref_id} already exists\",\n ) from err\n entity = entity.assign_ref_id(EntityId(str(result.inserted_primary_key[0])))\n await upsert_events(\n self._connection,\n self._chore_collection_event_table,\n entity,\n )\n return entity", "def add_collection(self, collection):\n self.collections.append(collection)", "def create_collection(href, cls):\n instance = cls(href=href)\n meth = getattr(instance, 'create')\n return type(\n cls.__name__, (SubElementCollection,), {'create': meth})(href, cls)", "def sub_collection(href, cls):\n return type(\n cls.__name__, (CreateCollection,), {})(href, cls)", "def _populatecollections(self):\r\n if not self._resourcedir in self.paths:\r\n return\r\n self.colltypes = defaultdict(set)\r\n alltypes = []\r\n colls = []\r\n for item in self.paths[self._resourcedir].dict[\"Instances\"]:\r\n # Fix for incorrect RDir instances.\r\n if (\r\n not self.typepath.defs.typestring in item\r\n or item[self.typepath.defs.hrefstring] in self.paths\r\n ):\r\n continue\r\n typename = \".\".join(\r\n item[self.typepath.defs.typestring].split(\".\", 2)[:2]\r\n ).split(\"#\")[-1]\r\n _ = [alltypes.append(typename) if not \"Collection\" in typename else None]\r\n _ = [colls.append(typename) if \"Collection\" in typename else None]\r\n member = RisMonolithMemberv100(None, self.is_redfish)\r\n member.popdefs(\r\n typename, item[self.typepath.defs.hrefstring], item[self.etagstr]\r\n )\r\n self.update_member(member=member, init=False)\r\n for coll in colls:\r\n collname = coll.split(\"Collection\")[0].split(\"#\")[-1]\r\n typename = next(\r\n (name for name in alltypes if name.startswith(collname)), None\r\n )\r\n colltype = \".\".join(coll.split(\".\", 2)[:2]).split(\"#\")[-1]\r\n self.colltypes[typename].add(colltype)", "def get_collection(self):\n filename = op.join(op.dirname(__file__), '%s.json' % self.collection)\n collection = json.loads(open(filename).read())\n return collection", "def add_collection(session, collection):\n validate(collection, COLLECTION_SCHEMA)\n collection_obj = Collection(name=collection['id'],\n meta=collection)\n session.add(collection_obj)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets this workspace as the default workspace for the active user.
def setHome(self): member = WorkspaceMember(self.logged_user_user_group, self.__user__) import labstep.generic.entity.repository as entityRepository return entityRepository.editEntity(member, {"is_home": True})
[ "def user_default(self, user_default: ConfigNodePropertyString):\n\n self._user_default = user_default", "def setWorkspace(self, workspaceName):\n if not self.contextHelper.isAccessibleWorkspaceName(workspaceName):\n raise Exception('Specified workspace not valid for your credentials')\n self.contextHelper.setWorkspace(workspaceName)", "def make_default_settings(self):\n if not os.path.exists(self.settings_dir):\n os.makedirs(self.settings_dir)\n # end\n self._set_settings(self.settings_default)", "def set_defaults(self):\n self._config[\"DEFAULT\"] = Config.Default\n\n if \"User\" not in self._config:\n Debug(self, \".set_defaults(): Creating empty User section\")\n self._config[\"User\"] = {}", "def set_default_site(self, msg, text):\n # store default_site for this user in our postgresql database\n return", "def set_default_tab(self):\n self.default_tab = self.driver.current_window_handle", "def set_default(self, default=None):\r\n self.default = default", "def setdefault(self, name: str, value: Any, isglobal: bool = False):\n flags = self.app.flags if isglobal else self.command.flags\n fdict = self.global_flags if isglobal else self.local_flags\n return ctx_default(flags, fdict, name, value)", "def set_default_portal_role(self):\n\t\tif self.data.get(\"default_portal_role\"):\n\t\t\tfrappe.db.set_single_value(\n\t\t\t\t\"Portal Settings\", \"default_role\", self.data.get(\"default_portal_role\")\n\t\t\t)", "def prompt_for_workspace():\n from getpass import getuser\n try:\n from tkinter import filedialog # start at ~ directory (home for logged in user)\n return filedialog.askdirectory(initialdir='/Users/{}'.format(getuser())) # python 3\n except:\n from tkinter import tkFiledialog # start at ~ directory (home for logged in user)\n return tkFileDialog.askdirectory(initialdir='/Users/{}'.format(getuser())) # python 2 fallback", "def setDefaults(self):\n\t\tself.user = 'hdfs'\n\t\tself.releaseDir = '/usr'\n\t\tself.configLocal = '/etc/sysconfig/hadoop'", "def mark_default(self):\n previous = self.user.default_category()\n previous.default = False\n previous.isDefault = False\n previous.save()\n\n self['default'] = True\n self['isDefault'] = True\n self.save()\n return self", "def set_default_config(self, name):\n self.local.defaults.config = name", "def setDefaultProfile( self, profile ):\n self._defaultProfile = profile", "def setDefault(self,path):\n _exc.checkStatus(self.get(\"TreeSetDefault($)\",path))", "def is_default_team(self, is_default_team):\n\n self._is_default_team = is_default_team", "def default_(self, default_):\n\n self._default_ = default_", "def set_default_directory(self, suggestion):\n if os.path.exists(suggestion):\n if not os.path.isdir(suggestion):\n suggestion = os.path.dirname(suggestion)\n self.mainwindow.default_directory = self.default_directory = suggestion", "def set_default_editor(self, editor):\n self.defaults.editor = editor" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve a list of a User's Device Categorys across all Workspaces on Labstep,
def getDeviceCategorys( self, count=UNSPECIFIED, search_query=UNSPECIFIED, tag_id=UNSPECIFIED, extraParams={} ): import labstep.entities.deviceCategory.repository as deviceCategoryRepository extraParams = {"group_id": self.id, **extraParams} return deviceCategoryRepository.getDeviceCategorys( self.__user__, count=count, search_query=search_query, tag_id=tag_id, extraParams=extraParams, )
[ "def show_device_groups(login):\n url = login.server + \"/api/running/devices/device-group\"\n headers = {\n 'accept': \"application/vnd.yang.collection+json\",\n 'cache-control': \"no-cache\",\n }\n results = requests.request(\"GET\",\n url, headers=headers,\n auth=(login.username, login.password))\n\n data = results.json()\n for each in data[\"collection\"][\"tailf-ncs:device-group\"]:\n click.echo(each[\"name\"])", "def show_device_group_devices(login, device_group):\n url = login.server + \"/api/running/devices/device-group/\" + device_group\n headers = {\n 'accept': \"application/vnd.yang.data+json\",\n 'cache-control': \"no-cache\",\n }\n results = requests.request(\"GET\",\n url, headers=headers,\n auth=(login.username, login.password))\n\n data = results.json()\n click.echo(\"---- Devices ----\")\n for each in data[\"tailf-ncs:device-group\"][\"device-name\"]:\n click.echo(each)\n if data[\"tailf-ncs:device-group\"][\"device-group\"]:\n click.echo(\"---- Device Groups ----\")\n for each in data[\"tailf-ncs:device-group\"][\"device-group\"]:\n click.echo(each)", "def get_device_list():\n token = get_auth_token() # Get Token\n url = \"https://{}/api/v1/network-device/1/10\".format(DNAC_URL)\n hdr = {'x-auth-token': token, 'content-type' : 'application/json'}\n resp = requests.get(url, headers=hdr) # Make the Get Request\n device_list = resp.json()\n print_device_list(device_list)", "def network_device_list(host, ticket):\n url = \"https://{}/api/v1/network-device\".format(host)\n headers[\"x-auth-token\"] = ticket\n \n # Make API request and return the response body\n response = requests.request(\"GET\", url, headers=headers, verify=False)\n return response.json()[\"response\"]", "def _get_device_dirs():\n try:\n user_devices = LabConfig().get('DEFAULT', 'user_devices')\n except (LabConfig.NoOptionError, LabConfig.NoSectionError):\n user_devices = 'user_devices'\n # Split on commas, remove whitespace:\n user_devices = [s.strip() for s in user_devices.split(',')]\n return _get_import_paths(['labscript_devices'] + user_devices)", "def get_network_devices(controller, ticket):\n url = controller + \"network-device\"\n header = {\"content-type\": \"application/json\", \"X-Auth-Token\": ticket}\n response = requests.get(url, headers=header, verify=False)\n r_json = response.json()\n result = r_json[\"response\"]\n pprint.pprint(result)\n devices = dict()\n i = 0\n for r in result:\n n = NetworkDevices(\n hostname=r['hostname'],\n family=r['family'],\n macAddress=r['macAddress'],\n type=r['type'],\n serialNumber=r['serialNumber'],\n id=r['id'],\n platformId=r['platformId'],\n lookup=i,\n )\n i = i + 1\n devices[n.lookup] = n\n\n return devices", "def list_devices():\n out = subprocess.check_output([\"colormgr\", \"get-devices-by-kind\", \"display\"])\n for line in out.decode(\"utf8\").split(\"\\n\"):\n if line.startswith(\"Model:\"):\n print(line.split(\":\")[1].lstrip())", "def getApplicableDevices(self):\r\n params={}\r\n params['custId']='1'\r\n self.applicableDevices=self.restApiDataHandler.getData('applicableDevices', noKey=True, module='deviceInfo', arg=params)", "def list_devices(self, c, boardGroup=None):\n IDs, names = self.deviceLists()\n devices = zip(IDs, names)\n if boardGroup is not None:\n # Make sure this board group exists\n bg = self.getBoardGroup(boardGroup)\n devices = [(id, name) for (id, name) in devices\n if name.startswith(boardGroup)]\n return devices", "def get_device_list(schema):\n def get_key(device):\n return (device[\"type\"], device[\"id\"])\n return sorted(schema.graph_inst[\"devices\"], key=get_key)", "def advapi32_GetManagedApplicationCategories(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"dwReserved\", \"pAppCategory\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def listDevices():\n return Controller().listDevices()", "def get_managed_devices(self):\n\n # Device with (boxid = 1) OR (troubleshootingstatus = 0)\n filter_value = {\n \"value\": [\n {\n \"property\": \"boxid\",\n \"value\": [1],\n \"operator\": \"=\"\n },\n {\n \"property\": \"troubleshootingstatus\",\n \"value\": [0],\n \"operator\": \"=\"\n }\n ],\n \"operator\": \"OR\"\n }\n query = QUERY_FILTER % json.dumps(filter_value)\n\n response = self.request(PATH_MANAGED_DEVICES, query)\n\n if response.status_code == requests.codes.ok:\n return response.json()\n else:\n print(\"Unrecognised status for managed device fetch\" + response.status_code)", "def get_devices():\n url = 'https://www.chromium.org/chromium-os/developer-information-for-chrome-os-devices'\n response = requests.get(url)\n response.raise_for_status()\n html = response.text.split('<table id=\"goog-ws-list-table\"')[1].split('</table>')[0]\n html = '<table id=\"goog-ws-list-table\"' + html + '</table>'\n table = ET.XML(html.encode('utf-8'))\n keys = [k.text for k in table[0][0]]\n devices = []\n for row in table[1]:\n device = dict()\n for num, value in enumerate(row):\n device[keys[num]] = None\n if value.text:\n device[keys[num]] = value.text.strip()\n elif list(value)[0].text:\n device[keys[num]] = list(value)[0].text.strip()\n devices.append(device)\n return devices", "def getSpecCategories(self, field):\n pc = getToolByName(self, 'portal_catalog')\n categories = []\n\n for spec in field.getResultsRange():\n service = pc(portal_type='AnalysisService',\n getKeyword=spec['keyword'])[0].getObject()\n if service.getCategoryUID() not in categories:\n categories.append({'UID': service.getCategoryUID(),\n 'Title': service.getCategoryName()})\n return categories", "def list_devices(self):\n pass", "def __send_get_categories(self):\n self.__send_command(CommandsBytes.GET_CATEGORIES)", "def workspacesByProductType(self, *args) -> \"adsk::core::Ptr< adsk::core::WorkspaceList >\" :\n return _core.UserInterface_workspacesByProductType(self, *args)", "def test_get_all_device_group(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build the widget based on the dictionary from the data source
def build(self, data_dict): # The widgets are part of every instance self.ques_image.source = data_dict["image"] self.header_label.text = data_dict["header"] # But this content is generated dynamically self.box_container.add_widget(self.get_content(data_dict))
[ "def create_widgets(self):\n\n self.create_label(\"Pick Date\")\n self.create_lbox(40, 15)\n\n self.filtentry = tk.Entry(self.parent)\n self.filtentry.grid(row = 2, column = 0, columnspan = 2, sticky = tk.EW)\n self.fbutt = tk.Button(self.parent, text = 'Filter', command = lambda: None)\n self.fbutt.grid(row = 3, column = 0, columnspan = 2, sticky = tk.EW)\n self.defbutt = tk.Button(self.parent, text = 'Reset', command = lambda: None)\n self.defbutt.grid(row = 4, column = 0, columnspan = 2, sticky = tk.EW)", "def create_query_widget(self):\r\n\r\n\t\tlogger.info(\"-- Create Query Widget --\")\r\n\t\t# Retrieving input parameter names from the excel sheet\r\n\r\n\t\ttry:\r\n\t\t\tself.input_params = get_input_params(self.combo_region.get(), self.combo_tables.get())\r\n\t\t\tlogger.info(\"Input params retreieved : %s\" % (str(self.input_params), ))\r\n\t\texcept Exception, e:\r\n\t\t\tlogger.error(\"Error creating widgets. Error - %s\" % (str(e), ))\r\n\t\t\tself.statusbar_status['value'] = \"Error - check logs.\"\r\n\r\n\t\t# Checks if the query frame exists\r\n\t\tif self.query_frame.winfo_exists():\r\n\t\t\tself.query_frame.destroy()\r\n\t\t# Creating a frame at run-time and updating its widgets\r\n\t\t# Creates a new frame\r\n\t\t\r\n\t\tself.query_frame = ttk.Frame(self.query_canvas, height=2, width=2, padding=(5, 3, 5, 5))\r\n\t\tself.query_canvas.create_window((4,4), window=self.query_frame, anchor=\"nw\")\r\n\t\tself.query_frame.bind(\"<Configure>\", lambda event, canvas=self.query_canvas : self.onFrameConfigure(canvas))\r\n\r\n\t\treturn 0", "def create_widget(parent, control_name, control_value, trait,\n label_class=None, user_data=None):\n # Get the inner trait: expect only one inner trait\n # note: trait.inner_traits might be a method (ListInt) or a tuple\n # (List), whereas trait.handler.inner_trait is always a method\n if len(trait.handler.inner_traits()) != 2:\n raise Exception(\n \"Expect two inner traits in Dict control. Trait '{0}' \"\n \"inner traits are '{1}'.\".format(\n control_name, trait.inner_traits))\n inner_trait = trait.handler.inner_traits()[1]\n\n # Create the dict widget: a frame\n frame = QtGui.QFrame(parent=parent)\n frame.setFrameShape(QtGui.QFrame.StyledPanel)\n frame.user_data = user_data\n\n # Create tools to interact with the dict widget: expand or collapse -\n # add a dict item - remove a dict item\n tool_widget = QtGui.QWidget(parent)\n layout = QtGui.QHBoxLayout()\n layout.addStretch(1)\n tool_widget.setLayout(layout)\n # Create the tool buttons\n resize_button = QtGui.QToolButton()\n add_button = QtGui.QToolButton()\n layout.addWidget(resize_button)\n layout.addWidget(add_button)\n # Set the tool icons\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(_fromUtf8(\":/soma_widgets_icons/add\")),\n QtGui.QIcon.Normal, QtGui.QIcon.Off)\n add_button.setIcon(icon)\n icon = QtGui.QIcon()\n icon.addPixmap(\n QtGui.QPixmap(_fromUtf8(\":/soma_widgets_icons/nav_down\")),\n QtGui.QIcon.Normal, QtGui.QIcon.Off)\n resize_button.setIcon(icon)\n resize_button.setFixedSize(30, 22)\n add_button.setFixedSize(30, 22)\n\n # Create a new controller that contains length 'control_value' inner\n # trait elements\n controller = DictController()\n for name, inner_control_values in six.iteritems(control_value):\n controller.add_trait(str(name), inner_trait)\n setattr(controller, str(name), inner_control_values)\n\n # Create the associated controller widget\n controller_widget = ControllerWidget(controller, parent=frame,\n live=True, editable_labels=True,\n user_data=user_data)\n\n # Store some parameters in the dict widget\n frame.inner_trait = inner_trait\n frame.trait = trait\n frame.controller = controller\n frame.controller_widget = controller_widget\n frame.connected = False\n\n # Add the dict controller widget to the dict widget\n frame.setLayout(controller_widget.layout())\n\n # Set some callback on the dict control tools\n # Resize callback\n resize_hook = partial(\n DictControlWidget.expand_or_collapse, weak_proxy(frame),\n weak_proxy(resize_button))\n resize_button.clicked.connect(resize_hook)\n # Add dict item callback\n add_hook = partial(\n DictControlWidget.add_dict_item, parent, control_name, frame)\n add_button.clicked.connect(add_hook)\n\n # Create the label associated with the dict widget\n control_label = trait.label\n if control_label is None:\n control_label = control_name\n if label_class is None:\n label_class = QtGui.QLabel\n if control_label is not None:\n label = label_class(control_label, parent)\n else:\n label = None\n\n controller_widget.main_controller_def = (DictControlWidget, parent,\n control_name, frame)\n return (frame, (label, tool_widget))", "def _populateWidgets(self):\n self.logger.debug(\"Entering populateWidgets\")\n\n # Populate Audio device drop-down boxes\n self.inputDevices.clear()\n self.outputDevices.clear()\n for audio_device in self.audio_devices:\n if audio_device.input_channels > 0:\n self.inputDevices.addItem(audio_device.name, audio_device.index)\n if audio_device.output_channels > 0:\n self.outputDevices.addItem(audio_device.name, audio_device.index)\n\n # Populate Excitation Signals\n self.signalType.clear()\n self.signalType.addItem(\"Inverse Repeat Sequence\")\n self.signalType.addItem(\"Maximum Length Sequence\")\n self.signalType.addItem(\"Low Pass Swept Sine\")\n self.signalType.addItem(\"Swept Sine\")\n\n # Populate Filters\n self.filterType.clear()\n self.filterType.addItem(\"Disabled\")\n self.filterType.addItem(\"Low Pass Filter\")\n self.filterType.addItem(\"High Pass Filter\")\n self.filterType.addItem(\"Bandpass Filter\")", "def build(self):\n\n # The Main Sizer for the Panel.\n panelSizer = wx.BoxSizer(wx.VERTICAL)\n # Pass the outermost Parts and the container to the OrderedDict Parser.\n self.parseContainer(self.form[\"Parts\"], panelSizer)\n self.SetSizerAndFit(panelSizer)", "def buildUISizer(self):\n flagsR = wx.RIGHT | wx.ALIGN_CENTER_VERTICAL\n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.AddSpacer(20)\n # Information rows\n for item in CHART_LABEL_LIST:\n sizer.Add(wx.StaticText(self, -1, item))\n datalabel = wx.StaticText(self, -1, '--')\n self.valueDispList.append(datalabel)\n sizer.Add(datalabel, flag=flagsR, border=2)\n # Control button rows.\n self.pauseBt = wx.Button(self, label='Pause ||', style=wx.BU_LEFT, size=(80, 23))\n self.pauseBt.Bind(wx.EVT_BUTTON, self.pauseUpdate)\n sizer.Add(self.pauseBt, flag=flagsR, border=2)\n self.detailBt = wx.Button(self, label='Detail >>', style=wx.BU_LEFT, size=(80, 23))\n self.detailBt.Bind(wx.EVT_BUTTON, self.showDetail)\n sizer.Add(self.detailBt, flag=flagsR, border=2)\n return sizer", "def _populateEntries(self):\n\n widgets = self.setting.getWidgetList()\n\n # we only need the list of names\n names = list(widgets.keys())\n names.sort()\n\n utils.populateCombo(self, names)", "def create_widgets(self):\n # create instruction label\n Label(self, text=\"Enter information for a new story. Update story after each entry\").grid(row=0, column=0, columnspan=2, sticky=W)\n \n # create a label and text entry for the name of a person\n Label(self, text=\"Name 1:\").grid(row=1, column=0, sticky=W)\n self.name1_ent=Entry(self)\n self.name1_ent.grid(row=1, column=1, sticky=W)\n \n # create a label and text entry for a place\n Label(self, text= \"Name 2:\").grid(row=2, column=0, sticky=W)\n self.name2_ent=Entry(self)\n self.name2_ent.grid(row=2, column=1, sticky=W)\n \n # create the building variable\n self.building=StringVar()\n self.building.set(None)\n \n # create a building label\n Label(self, text=\"Name of building:\").grid(row=3, column=0, sticky=W)\n \n # create a radiobutton option for the bank\n Radiobutton(self, text=\"Bank\", variable=self.building, value=\"the bank\").grid(row=3, column=1, sticky=W)\n \n # create a radiobutton option for City Hall\n Radiobutton(self, text=\"City Hall\", variable=self.building, value=\"City Hall\").grid(row=3, column=2, sticky=W)\n \n # create a radiobutton option for the police station\n Radiobutton(self, text=\"Police Station\", variable=self.building, value=\"the police station\").grid(row=3, column=3, sticky=W)\n \n # create a label and text entry for the first verb\n Label(self, text=\"Plural noun:\").grid(row=4, column=0, sticky=W)\n self.plural_noun_ent=Entry(self)\n self.plural_noun_ent.grid(row=4, column=1, sticky=W)\n \n Label(self, text=\"Name:\").grid(row=5, column=0, sticky=W)\n self.name3_ent=Entry(self)\n self.name3_ent.grid(row=5, column=1, sticky=W)\n \n Label(self, text=\"Noun:\").grid(row=6, column=0, sticky=W)\n self.noun1_ent=Entry(self)\n self.noun1_ent.grid(row=6, column=1, sticky=W)\n \n Label(self, text=\"Adjective(s):\").grid(row=7, column=0, sticky=W)\n \n self.is_dark=BooleanVar()\n Checkbutton(self, text=\"dark\", variable=self.is_dark).grid(row=7, column=1, sticky=W)\n \n self.is_ominous=BooleanVar()\n Checkbutton(self, text=\"ominous\", variable=self.is_ominous).grid(row=7, column=2, sticky=W)\n \n self.is_sinister=BooleanVar()\n Checkbutton(self, text=\"sinister\", variable=self.is_sinister).grid(row=7, column=3, sticky=W)\n \n Label(self, text=\"Verb ending in -ing:\").grid(row=8, column=0, sticky=W)\n self.verb_ing_ent=Entry(self)\n self.verb_ing_ent.grid(row=8, column=1, sticky=W)\n \n Label(self, text=\"Verb ending in -ed:\").grid(row=9, column=0, sticky=W)\n self.verb_ed_ent1=Entry(self)\n self.verb_ed_ent1.grid(row=9, column=1, sticky=W)\n \n Label(self, text=\"Verb ending in -ed:\").grid(row=10, column=0, sticky=W)\n self.verb_ed_ent2=Entry(self)\n self.verb_ed_ent2.grid(row=10, column=1, sticky=W)\n \n Label(self, text=\"Noun:\").grid(row=11, column=0, sticky=W)\n self.noun2_ent=Entry(self)\n self.noun2_ent.grid(row=11, column=1, sticky=W)\n \n Label(self, text=\"Noun:\").grid(row=12, column=0, sticky=W)\n self.noun3_ent=Entry(self)\n self.noun3_ent.grid(row=12, column=1, sticky=W)\n \n Label(self, text=\"Exclamation:\").grid(row=13, column=0, sticky=W)\n self.exclamation_ent=Entry(self)\n self.exclamation_ent.grid(row=13, column=1, sticky=W)\n \n Label(self, text=\"Noun:\").grid(row=14, column=0, sticky=W)\n self.noun4_ent=Entry(self)\n self.noun4_ent.grid(row=14, column=1, sticky=W)\n \n Label(self, text=\"Noun:\").grid(row=15, column=0, sticky=W)\n self.noun5_ent=Entry(self)\n self.noun5_ent.grid(row=15, column=1, sticky=W)\n \n Label(self, text=\"Name of junior:\").grid(row=16, column=0, sticky=W)\n self.name_of_junior_ent=Entry(self)\n self.name_of_junior_ent.grid(row=16, column=1, sticky=W)\n \n button=Button(self, text=\"Click to update story\", command=self.tell_story).grid(row=17, column=0, sticky=W)\n \n self.story_txt=Text(self, width=100, height=15, wrap=WORD)\n self.story_txt.grid(row=18, column=0, columnspan=4)\n \n story=\"Two friends, [name] and [name] were wandering around the Mississippi University \"\n story+=\"for Women on Halloween. As they were walking, they passed [name of building] where they \"\n story+=\"heard weird [plural noun] from inside. [name] decided to go investigate. \"\n story+=\"To get in the building, they climbed through an open [noun]. Inside, the hallway was [adjective(s)]\"\n story+=\" They heard [verb ending in -ing] coming from upstairs. [verb ending in -ed], they climbed the stairs. \"\n story+=\"In the hallway there was a light that [verb ending in -ed] from the ceiling. Suddenly, a [noun] appeared \"\n story+=\"in the [noun] coming from an open door. [exclamation]! [name of building] is haunted! \"\n story+=\"Suddenly, a gust of [noun] knocked down the [noun] and revealed [name of junior] trying to scare them.\"\n \n self.story_txt.insert(0.0, story)", "def create_widgets(self): \r\n # create description label\r\n Label(self,\r\n text = \"Choose your favorite movie types\"\r\n ).grid(row = 0, column = 0, sticky = W)\r\n\r\n # create instruction label\r\n Label(self,\r\n text = \"Select all that apply:\"\r\n ).grid(row = 1, column = 0, sticky = W)\r\n \r\n # create Comedy check button\r\n self.likes_comedy = BooleanVar()\r\n Checkbutton(self,\r\n text = \"Comedy\",\r\n variable = self.likes_comedy,\r\n command = self.update_text\r\n ).grid(row = 2, column = 0, sticky = W)\r\n\r\n # create Drama check button\r\n self.likes_drama = BooleanVar()\r\n Checkbutton(self,\r\n text = \"Drama\",\r\n variable = self.likes_drama,\r\n command = self.update_text\r\n ).grid(row = 3, column = 0, sticky = W)\r\n\r\n # create Romance check button\r\n self.likes_romance = BooleanVar()\r\n Checkbutton(self,\r\n text = \"Romance\",\r\n variable = self.likes_romance,\r\n command = self.update_text\r\n ).grid(row = 4, column = 0, sticky = W)\r\n\r\n # create text field to display results\r\n self.results_txt = Text(self, width = 40, height = 5, wrap = WORD)\r\n self.results_txt.grid(row = 5, column = 0, columnspan = 3)", "def widget(self):\n dictionary = {'label': QtWidgets.QLabel(self.label)}\n widget = dictionary['widget'] = QtWidgets.QComboBox()\n for label, choice in zip(self.labels, self.choices):\n widget.addItem(label, choice)\n widget.valueChanged = widget.currentIndexChanged\n # setValue for QComboBox\n def set_data(self, data=None):\n \"\"\"Method for setting the data of the QComboBox\"\"\"\n self.setCurrentIndex(self.findData(data))\n widget.setValue = MethodType(set_data, widget)\n widget.value = widget.currentData\n return dictionary", "def fill_KSK_list_widget(self):\r\n self.list_widget.clear()\r\n search_query = self.search_box.text()\r\n KSK_names, dates = search_for_KSK(search_query)\r\n if self.cb.count() == 0:\r\n self.cb.addItem('Filter by date')\r\n self.cb.addItems({date for date in dates.keys()})\r\n else:\r\n self.cb.setCurrentIndex(0)\r\n self.list_widget.addItems(KSK_names)", "def _build_datatable(self):\n def _get_value_metadata(cfg, data=None):\n \"\"\"Get value from metadata.\"\"\"\n if cfg.get('key'):\n return self.metadata.get(cfg.get('key'))\n\n def _get_value_datastore(cfg, data=None):\n \"\"\"Get value(s) from datastore.\"\"\"\n # jq().transform() returns a list of string(s)\n try:\n res = jq(cfg['jqexpr']).transform(data, multiple_output=True)\n except Exception as e:\n if 'Cannot iterate over null' in str(e):\n res = [np.nan]\n else:\n print('ERROR: Unable to get value from JSON: %s' % e)\n print('ERROR: cfg = %s' % cfg)\n print('ERROR: data = %s' % data)\n exit(1)\n\n # multiply the factor if available\n if 'factor' in cfg:\n res = [x * cfg['factor'] for x in res]\n\n # return the whole list or the only value\n return res if len(res) > 1 else res[0]\n\n def _get_value_auto(cfg, data=None):\n \"\"\"Get value by calculating.\"\"\"\n if cfg['name'] == 'Sample':\n return 'all'\n if cfg['name'] == 'Path':\n value = os.path.join(data['path_lv_1'], data['path_lv_2'])\n return value\n\n def _get_value_unknown(cfg, data=None):\n print('ERROR: Unknown type in \"source\", config = \"%s\".' % cfg)\n exit(1)\n\n switch = {\n 'metadata': _get_value_metadata,\n 'datastore': _get_value_datastore,\n 'auto': _get_value_auto,\n }\n\n self.config\n self.datastore\n self.datatable = []\n\n # generate rows for the datatable\n for iterdata in self.datastore:\n # generate one row\n data = {}\n for cfg in self.config.get('columns'):\n # get and set value(s)\n name = cfg.get('name')\n data[name] = switch.get(cfg['source'],\n _get_value_unknown)(cfg, iterdata)\n\n # deal with split if needed\n need_split = False\n if self.config.get('defaults', {}).get('split'):\n # get max number of samples\n max_sample = 1\n for value in data.values():\n if isinstance(value, list) and len(value) > max_sample:\n max_sample = len(value)\n need_split = True if max_sample > 1 else False\n\n if need_split:\n # split into samples\n for index in range(1, max_sample + 1):\n sample_data = {}\n # deal with each column\n for name, value in data.items():\n if isinstance(value, list):\n # get the first value and save the rest\n sample_data[name] = value[0]\n data[name] = value[1:]\n # Set \"WRONG\" flags for user check\n if len(data[name]) == 0:\n data[name] = 'WRONG'\n else:\n sample_data[name] = value\n\n # update related columns\n if 'Sample' in data:\n sample_data['Sample'] = index\n if 'Path' in data:\n sample_data['Path'] = os.path.join(\n data['Path'], 'sample%s' % index)\n\n # save this row (sample) to datatable\n self.datatable.append(sample_data.copy())\n else:\n # no need to split, save directly\n self.datatable.append(data.copy())", "def initialize_widgets(self):\n self.event_selector = models.Select(\n title='Select Competition',\n options=[(evt.key, evt.name + ' | ' + evt.end_date)\n for evt in self.event_data.itertuples()],\n value=self.event)\n\n self.level_selector = models.Select(\n title='Select Competition Level',\n options=[\n ('qm', 'Qualification'),\n ('qf', 'Quarterfinals'),\n ('sf', 'Semifinals'),\n ('f', 'Finals')],\n value=self.level\n )\n\n self.match_selector = models.Select(\n title='Select Match',\n options=self.level_matches,\n value=self.match)\n\n self.time_select_type = models.CheckboxButtonGroup(\n labels=['All', 'Span', 'Range'],\n active=[0])\n\n self.time_range_selector = models.RangeSlider(\n start=0, end=160, step=1,\n value=(0, 150),\n title='Select Time Range in Seconds')\n self.time_range_selector.visible = False\n\n self.time_span_selector = models.Slider(\n start=self.start_time, end=self.end_time, step=1,\n value=15,\n title='Select Time Span End')\n self.time_span_selector.visible = False\n\n self.span_length_spinner = models.Spinner(\n title='Span Length',\n low=5, high=55, step=10, value=15)\n self.span_length_spinner.visible = False", "def construct(self):\n return div.render(self.values)", "def bind_datasource(self, data_source):\n self.model = DataGridModel(data_source,\n self.get_full_path,\n self.decode_fallback)\n self.model.connect('data-loaded', self.on_data_loaded)\n for view in [self.tree_view, self.icon_view]:\n view.model = self.model\n\n liststore_date_cols = Gtk.ListStore(str, str, str)\n if self.model.datetime_columns:\n self.date_start.set_sensitive(True)\n self.date_end.set_sensitive(True)\n\n for column in self.model.datetime_columns:\n liststore_date_cols.append(\n (column['name'], column['display'], column['transform']))\n\n combox_date_cols = self.container.combobox_date_columns\n old_model = combox_date_cols.get_model()\n if old_model:\n del old_model\n combox_date_cols.set_model(liststore_date_cols)\n if not combox_date_cols.get_cells():\n cell = Gtk.CellRendererText()\n combox_date_cols.pack_start(cell, True)\n combox_date_cols.add_attribute(cell, 'text', 1)\n combox_date_cols.set_active(0)\n combox_date_cols.connect('changed', self.on_date_change, None)\n\n # Hide date column selection if there can be no choice\n if len(liststore_date_cols) < 2:\n combox_date_cols.hide()\n self.container.date_column_label.hide()\n else:\n # They might have been hidden on a previous bind call.\n combox_date_cols.show()\n self.container.date_column_label.show()\n\n # If the are no date columns, hide the date range controls as well\n widgets = (\n self.container.image_start_date,\n self.container.vbox_start_date,\n self.container.label_date_to,\n self.container.image_end_date,\n self.container.vbox_end_date,\n self.container.filters_separator,\n )\n if len(liststore_date_cols) == 0:\n for widget in widgets:\n widget.hide()\n else:\n combox_date_cols.set_active(0)\n for widget in widgets:\n widget.show()\n\n self._refresh_view()", "def create_widgets(self):\n\t self.insuruction = Label(self, text = \"Entre the passord\")\n\t self.instruction.grid(row = 0, column =0, columnspan =2, sticky = W) # put label left side\n\t \n\t self.password = Entry(self)\n\t self.password.grid(row = 1, column =1, sticky = W)\n\t \n\t self.submit_button = Button(self, text = \" Submit\", command = self.reveal)\n\t self.submit_button.grid(row = 2, column=0, sticky = W)\n\t \n\t self.text = Text(sel, widty = 35, height=5, wray = WORD)\n\t selftext.grid(row =3, column=0, columnspan =2, sticky = W)", "def build():\n return RootWidget()", "def InitUI(self):\n if self.data_type in ['orient', 'ages']:\n belongs_to = []\n else:\n parent_table_name = self.parent_type + \"s\"\n if parent_table_name in self.contribution.tables:\n belongs_to = sorted(self.contribution.tables[parent_table_name].df.index.unique())\n else:\n belongs_to = []\n\n self.choices = {}\n if self.data_type in ['specimens', 'samples', 'sites']:\n self.choices = {1: (belongs_to, False)}\n if self.data_type == 'orient':\n self.choices = {1: (['g', 'b'], False)}\n if self.data_type == 'ages':\n for level in ['specimen', 'sample', 'site', 'location']:\n if level in self.grid.col_labels:\n level_names = []\n if level + \"s\" in self.contribution.tables:\n level_names = list(self.contribution.tables[level+\"s\"].df.index.unique())\n num = self.grid.col_labels.index(level)\n self.choices[num] = (level_names, False)\n # Bind left click to drop-down menu popping out\n self.grid.Bind(wx.grid.EVT_GRID_CELL_LEFT_CLICK,\n lambda event: self.on_left_click(event, self.grid, self.choices))\n\n cols = self.grid.GetNumberCols()\n col_labels = [self.grid.GetColLabelValue(col) for col in range(cols)]\n\n # check if any additional columns have controlled vocabularies\n # if so, get the vocabulary list\n for col_number, label in enumerate(col_labels):\n self.add_drop_down(col_number, label)", "def buildcontent(self):\n self.buildcontainer()\n self.buildjschart()\n self.htmlcontent = self.template_content_nvd3.substitute(container=self.container,\n jschart=self.jschart)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
objective function for lightgbm.
def objective(params): # hyperopt casts as float params['num_boost_round'] = int(params['num_boost_round']) params['num_leaves'] = int(params['num_leaves']) # need to be passed as parameter if self.is_unbalance: params['is_unbalance'] = True params['verbose'] = -1 params['seed'] = 1 if self.with_focal_loss: focal_loss = lambda x,y: focal_loss_lgb(x, y, params['alpha'], params['gamma']) cv_result = lgb.cv( params, train, num_boost_round=params['num_boost_round'], fobj = focal_loss, feval = lgb_focal_f1_score, nfold=3, stratified=True, early_stopping_rounds=20) else: cv_result = lgb.cv( params, train, num_boost_round=params['num_boost_round'], metrics='binary_logloss', feval = lgb_f1_score, nfold=3, stratified=True, early_stopping_rounds=20) self.early_stop_dict[objective.i] = len(cv_result['f1-mean']) score = round(cv_result['f1-mean'][-1], 4) objective.i+=1 return -score
[ "def run_optuna2():\n # rf_params = {\"max_depth\": [5, 15, None],\n # \"max_features\": [5, 9, \"auto\"],\n # \"min_samples_split\": [6, 8, 15],\n # \"n_estimators\": [150, 200, 300]}\n import optuna\n import lightgbm as lgb\n import sklearn.datasets\n import sklearn.metrics\n from sklearn.model_selection import train_test_split\n\n # FYI: Objective functions can take additional arguments\n # (https://optuna.readthedocs.io/en/stable/faq.html#objective-func-additional-args).\n def objective(trial):\n dataset = df.copy()\n dataset = dataset.sample(frac=0.02)\n print(dataset.shape)\n data = dataset.drop(['Cover_Type'], axis=1)\n target = dataset['Cover_Type']\n\n X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.20)\n # X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.25)\n\n dtrain = lgb.Dataset(X_train, label=y_train)\n # dvalid = lgb.Dataset(X_valid, label=y_valid, reference=dtrain)\n # dtest = lgb.Dataset(X_test)\n\n\n params = {\n 'num_class': 8, ## We have 7 tree types...\n #\"objective\": \"regression\",\n #\"objective\": \"binary\",\n \"objective\": \"multiclass\",\n #\"metric\": 'f1_macro',\n #\"metric\": 'multi_error',\n \"metric\": 'multi_logloss',\n \"verbosity\": -1,\n #\"boosting_type\": \"gbdt\",\n #\"boosting_type\": \"rf\",\n \"boosting_type\": trial.suggest_categorical(\"boosting_type\", ['gbdt', 'rf']),\n \"lambda_l1\": trial.suggest_float(\"lambda_l1\", 1e-8, 10.0, log=True),\n \"lambda_l2\": trial.suggest_float(\"lambda_l2\", 1e-8, 10.0, log=True),\n \"num_leaves\": trial.suggest_int(\"num_leaves\", 2, 256),\n \"feature_fraction\": trial.suggest_float(\"feature_fraction\", 0.4, 1.0),\n \"bagging_fraction\": trial.suggest_float(\"bagging_fraction\", 0.4, 1.0),\n \"bagging_freq\": trial.suggest_int(\"bagging_freq\", 1, 7),\n \"min_child_samples\": trial.suggest_int(\"min_child_samples\", 5, 100),\n \"learning_rate\": trial.suggest_float(\"learning_rate\", 0.001, 0.1),\n \"max_depth\": trial.suggest_int(\"max_depth\", 1, 110),\n \"num_leaves\": trial.suggest_int(\"num_leaves\", 31, 128),\n }\n\n model = lgb.train(params, dtrain)\n vd_preds = model.predict(X_test)\n vd_preds = np.argmax(vd_preds, axis=1) # since its a multiclass we need the most freq. Returns max\n accuracy = accuracy_score(y_test, vd_preds)\n return 1 - round(accuracy, 2) # we need to minimize\n\n if __name__ == \"__main__\":\n study = optuna.create_study(direction=\"minimize\")\n study.optimize(objective, n_trials=250)\n\n print(\"Number of finished trials: {}\".format(len(study.trials)))\n\n print(\"Best trial:\")\n trial = study.best_trial\n\n print(\" Value: {}\".format(trial.value))\n\n print(\" Params: \")\n for key, value in trial.params.items():\n print(\" {}: {}\".format(key, value))", "def run_optuna():\n # rf_params = {\"max_depth\": [5, 15, None],\n # \"max_features\": [5, 9, \"auto\"],\n # \"min_samples_split\": [6, 8, 15],\n # \"n_estimators\": [150, 200, 300]}\n import optuna\n import lightgbm as lgb\n import sklearn.datasets\n import sklearn.metrics\n from sklearn.model_selection import train_test_split\n\n # FYI: Objective functions can take additional arguments\n # (https://optuna.readthedocs.io/en/stable/faq.html#objective-func-additional-args).\n def objective(trial):\n dataset = df.copy()\n\n data = dataset.drop(['Cover_Type'], axis=1)\n target = dataset['Cover_Type']\n\n X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.20)\n X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.25)\n\n dtrain = lgb.Dataset(X_train, label=y_train)\n dvalid = lgb.Dataset(X_valid, label=y_valid, reference=dtrain)\n dtest = lgb.Dataset(X_test)\n\n\n params = {\n 'num_class': 8, ## We have 7 tree types...\n #\"objective\": \"regression\",\n #\"objective\": \"binary\",\n \"objective\": \"multiclass\",\n #\"metric\": 'f1_macro',\n #\"metric\": 'multi_error',\n \"metric\": 'multi_logloss',\n \"verbosity\": -1,\n \"boosting_type\": \"gbdt\",\n #\"boosting_type\": \"rf\",\n \"lambda_l1\": trial.suggest_float(\"lambda_l1\", 1e-8, 10.0, log=True),\n \"lambda_l2\": trial.suggest_float(\"lambda_l2\", 1e-8, 10.0, log=True),\n \"num_leaves\": trial.suggest_int(\"num_leaves\", 2, 256),\n \"feature_fraction\": trial.suggest_float(\"feature_fraction\", 0.4, 1.0),\n \"bagging_fraction\": trial.suggest_float(\"bagging_fraction\", 0.4, 1.0),\n \"bagging_freq\": trial.suggest_int(\"bagging_freq\", 1, 7),\n \"min_child_samples\": trial.suggest_int(\"min_child_samples\", 5, 100),\n \"learning_rate\": trial.suggest_float(\"learning_rate\", 0.001, 0.1),\n \"max_depth\": trial.suggest_int(\"max_depth\", 1, 110),\n \"num_leaves\": trial.suggest_int(\"num_leaves\", 31, 128),\n }\n pruning_callback = optuna.integration.LightGBMPruningCallback(trial,\n \"multi_logloss\")\n model = lgb.train(params, dtrain,\n num_boost_round=1000,\n early_stopping_rounds=30,\n valid_sets=dvalid,\n callbacks=[pruning_callback]\n )\n vd_preds = model.predict(X_test, num_iteration=model.best_iteration)\n vd_preds = np.argmax(vd_preds, axis=1) # since its a multiclass we need the most freq. Returns max\n\n accuracy = accuracy_score(y_test, vd_preds)\n # gbm = lgb.train(param, dtrain)\n # pred_labels = np.rint(preds)\n # rmse = sklearn.metrics.mean_squared_error(valid_y, preds, squared=False)\n #accuracy = accuracy_score(valid_y, preds)\n return 1 - round(accuracy, 2) # we need to minimize\n\n if __name__ == \"__main__\":\n study = optuna.create_study(direction=\"minimize\")\n study.optimize(objective, n_trials=250)\n\n print(\"Number of finished trials: {}\".format(len(study.trials)))\n\n print(\"Best trial:\")\n trial = study.best_trial\n\n print(\" Value: {}\".format(trial.value))\n\n print(\" Params: \")\n for key, value in trial.params.items():\n print(\" {}: {}\".format(key, value))", "def train_lgbm(X_train, Y_train,\n categorical_feature=['referer_code', 'is_app', 'agent_id', 'traffic_type', 'action_id', 'reference'],\n model_path=None, n_jobs=3, hyperparameter_tuning=False, num_boost_round=100, folds=3):\n\n print('\\n === train a lightGBM === \\n')\n\n d_train = lgb.Dataset(X_train, label=Y_train,\n # categorical_feature=['aisle_id', 'department_id']\n categorical_feature=categorical_feature,\n )\n\n\n if not hyperparameter_tuning:\n params = {\n 'boosting_type': 'gbdt',\n 'objective': 'binary',\n 'num_class': 1, # must be 1 for non-multiclass training\n 'metric': 'binary_error',\n #'metric': 'binary_logloss',\n #'n_jobs': n_jobs,\n 'nthread': n_jobs,\n #'num_leaves': 31,\n 'num_leaves': 64,\n 'min_child_weight': 1,\n 'min_child_samples': 5,\n 'scale_pos_weight': 1,\n 'reg_alpha': 5,\n 'learning_rate': 0.05,\n 'max_bin': 512,\n #'feature_fraction': 0.9,\n #'bagging_fraction': 0.8,\n #'bagging_freq': 5,\n #'verbose': 0\n }\n\n gbm = lgb.train(params,\n d_train,\n num_boost_round=num_boost_round,\n categorical_feature=categorical_feature)\n\n else:\n params = {'boosting_type': 'gbdt',\n 'max_depth': -1,\n 'objective': 'binary',\n 'nthread': n_jobs, # Updated from nthread\n 'num_leaves': 64,\n 'learning_rate': 0.05,\n 'max_bin': 512,\n 'subsample_for_bin': 200,\n 'subsample': 1,\n 'subsample_freq': 1,\n 'colsample_bytree': 0.8,\n 'reg_alpha': 5,\n 'reg_lambda': 10,\n 'min_split_gain': 0.5,\n 'min_child_weight': 1,\n 'min_child_samples': 5,\n 'scale_pos_weight': 1,\n 'num_class': 1,\n 'metric': 'binary_error'}\n\n gridParams = {\n 'learning_rate': [0.005],\n 'n_estimators': [8, 16, 24],\n 'num_leaves': [6, 8, 12, 16],\n 'boosting_type': ['gbdt'],\n 'objective': ['binary'],\n 'random_state': [42], # Updated from 'seed'\n 'colsample_bytree': [0.64, 0.65, 0.66],\n 'subsample': [0.7, 0.75],\n 'reg_alpha': [1, 1.2],\n 'reg_lambda': [1, 1.2, 1.4],\n }\n\n mdl = lgb.LGBMClassifier(boosting_type='gbdt',\n objective='binary',\n n_jobs=n_jobs, # Updated from 'nthread'\n silent=True,\n max_depth=params['max_depth'],\n max_bin=params['max_bin'],\n subsample_for_bin=params['subsample_for_bin'],\n subsample=params['subsample'],\n subsample_freq=params['subsample_freq'],\n min_split_gain=params['min_split_gain'],\n min_child_weight=params['min_child_weight'],\n min_child_samples=params['min_child_samples'],\n scale_pos_weight=params['scale_pos_weight'])\n\n print(mdl.get_params().keys())\n\n grid = RandomizedSearchCV(estimator=mdl, param_distributions=gridParams,\n n_iter=100, cv=folds, verbose=2, random_state=42, n_jobs=n_jobs)\n\n #grid = GridSearchCV(mdl, gridParams, verbose=2, cv=folds, n_jobs=n_jobs)\n grid.fit(X_train, Y_train)\n\n print('best parameters:')\n print(grid.best_params_)\n print('best score: ')\n print(grid.best_score_)\n\n # using parameters already set above, replace in the best from the grid search\n params['colsample_bytree'] = grid.best_params_['colsample_bytree']\n params['learning_rate'] = grid.best_params_['learning_rate']\n #params['max_bin'] = grid.best_params_['max_bin']\n params['num_leaves'] = grid.best_params_['num_leaves']\n params['reg_alpha'] = grid.best_params_['reg_alpha']\n params['reg_lambda'] = grid.best_params_['reg_lambda']\n params['subsample'] = grid.best_params_['subsample']\n #params['subsample_for_bin'] = grid.best_params_['subsample_for_bin']\n\n print('Fitting with params: ')\n print(params)\n\n X_train_sub, X_val, Y_train_sub, Y_val = train_test_split(X_train, Y_train, test_size=0.1, random_state=42)\n\n d_train_sub = lgb.Dataset(X_train_sub, label=Y_train_sub,\n # categorical_feature=['aisle_id', 'department_id']\n categorical_feature=categorical_feature,\n #categorical_feature='auto'\n )\n\n d_val_sub = lgb.Dataset(X_val, label=Y_val,\n # categorical_feature=['aisle_id', 'department_id']\n categorical_feature=categorical_feature,\n #categorical_feature='auto'\n )\n\n gbm = lgb.train(params,\n d_train_sub,\n num_boost_round=1000,\n valid_sets=[d_train_sub, d_val_sub],\n early_stopping_rounds=50,\n verbose_eval=4)\n\n # Plot importance\n #lgb.plot_importance(gbm)\n\n if model_path is None:\n model_path = 'lgbm.model'\n if hyperparameter_tuning:\n model_path = 'lgbm.ht.model'\n\n # save model to file\n gbm.save_model(model_path)\n print('save the lightGBM model to {}'.format(model_path))\n\n # load model to predict\n # print('Load model to predict')\n # bst = lgb.Booster(model_file='model.txt')\n # can only predict with the best iteration (or the saving iteration)\n # y_pred = bst.predict(X_test)\n\n return gbm, model_path", "def objective(self,data):\r\n F = -0.5*self.lbda*(np.sum(self.U*self.U)+np.sum(self.V*self.V))\r\n for i in xrange(len(self.U)):\r\n f = self.precompute_f(data,i)\r\n for j in f:\r\n F += log(g(f[j]))\r\n for k in f:\r\n F += log(1-g(f[k]-f[j]))\r\n return F", "def test_objective(self):\n for objective in self._objectives:\n with self.subTest(X=_X, objective=objective):\n regressor = LGBMRegressor(objective=objective, num_thread=1)\n regressor.fit(_X, _Y)\n regressor_onnx: ModelProto = convert_lightgbm(\n regressor,\n initial_types=self._calc_initial_types(_X),\n target_opset=TARGET_OPSET,\n )\n y_pred = regressor.predict(_X)\n y_pred_onnx = self._predict_with_onnx(regressor_onnx, _X)\n self._assert_almost_equal(\n y_pred,\n y_pred_onnx,\n decimal=_N_DECIMALS,\n frac=_FRAC,\n )", "def _set_lgb_parameters(\n X: np.ndarray,\n y: np.ndarray,\n objective: str,\n rf: bool,\n silent: bool,\n n_jobs: int = 0,\n lgbm_params: dict = None,\n) -> dict:\n\n n_feat = X.shape[1]\n\n params = lgbm_params if lgbm_params is not None else {}\n\n params[\"objective\"] = objective\n params[\"verbosity\"] = -1\n if objective == \"softmax\":\n params[\"num_class\"] = len(np.unique(y))\n\n if rf:\n feat_frac = (\n np.sqrt(n_feat) / n_feat\n if objective in [\"softmax\", \"binary\"]\n else n_feat / (3 * n_feat)\n )\n params.update(\n {\n \"boosting_type\": \"rf\",\n \"bagging_fraction\": 0.7,\n \"feature_fraction\": feat_frac,\n \"bagging_freq\": 1,\n }\n )\n\n clf_losses = [\n \"binary\",\n \"softmax\",\n \"multi_logloss\",\n \"multiclassova\",\n \"multiclass\",\n \"multiclass_ova\",\n \"ova\",\n \"ovr\",\n \"binary_logloss\",\n ]\n if objective in clf_losses:\n y = y.astype(int)\n y_freq_table = pd.Series(y.fillna(0)).value_counts(normalize=True)\n n_classes = y_freq_table.size\n if n_classes > 2 and objective != \"softmax\":\n params[\"objective\"] = \"softmax\"\n params[\"num_class\"] = len(np.unique(y))\n if not silent:\n print(\"Multi-class task, setting objective to softmax\")\n main_class = y_freq_table[0]\n if not silent:\n print(\"GrootCV: classification with unbalance classes\")\n if main_class > 0.8:\n params.update({\"is_unbalance\": True})\n\n params.update({\"num_threads\": n_jobs})\n\n # we are using early_stopping\n # we prevent the overridding of it by popping the n_iterations\n keys_to_pop = [\n \"num_iterations\",\n \"num_iteration\",\n \"n_iter\",\n \"num_tree\",\n \"num_trees\",\n \"num_round\",\n \"num_rounds\",\n \"nrounds\",\n \"num_boost_round\",\n \"n_estimators\",\n \"max_iter\",\n ]\n for key in keys_to_pop:\n params.pop(key, None)\n\n return params", "def _objective_fn(\n pair: gaussian_mixture_pair.GaussianMixturePair,\n obs0: Observations,\n obs1: Observations,\n ) -> jnp.ndarray:\n q0 = get_q(gmm=pair.gmm0, obs=obs0)\n q1 = get_q(gmm=pair.gmm1, obs=obs1)\n cost_matrix = pair.get_cost_matrix()\n sinkhorn_output = pair.get_sinkhorn(cost_matrix=cost_matrix)\n transport_penalty = sinkhorn_output.reg_ot_cost\n return q0 + q1 - weight_transport * transport_penalty", "def __objective_fcn(self, y_true, y_pred, **kwargs):\n obj1 = kwargs['P'](y_true,y_pred) #objective 1\n obj2 = kwargs['ratio_selected_features'] #is objective 2\n \n particle_value = self.obj_function_equation(obj1,obj2, kwargs['alpha'])\n \n return particle_value", "def _create_m_objective(w, X):\n clusters, cells = w.shape\n genes = X.shape[0]\n w_sum = w.sum(1)\n def objective(m):\n m = m.reshape((X.shape[0], w.shape[0]))\n d = m.dot(w)+eps\n temp = X/d\n w2 = w.dot(temp.T)\n deriv = w_sum - w2.T\n return np.sum(d - X*np.log(d))/genes, deriv.flatten()/genes\n return objective", "def metric_lgbm(self):\n name = self.metric_name()\n scorer = sklearn.metrics.get_scorer(name)\n _func = scorer._score_func\n _sign = scorer._sign == 1\n\n def _f(y_true, y_score):\n y_pred = y_score.reshape(-1, y_true.shape[0]).argmax(0)\n return name, _func(y_true, y_pred), _sign\n\n return _f", "def get_objective_value(self):\n raise NotImplementedError()", "def _objective_function(ndim, voxel_size, sigma_z, sigma_yx, amplitude):\n # define objective gaussian function\n if ndim == 3:\n f = _objective_function_3d(\n voxel_size_z=voxel_size[0],\n voxel_size_yx=voxel_size[-1],\n sigma_z=sigma_z,\n sigma_yx=sigma_yx,\n amplitude=amplitude)\n else:\n f = _objective_function_2d(\n voxel_size_yx=voxel_size[-1],\n sigma_yx=sigma_yx,\n amplitude=amplitude)\n\n return f", "def minimise_objective_function_BFGS(self):\r\n result = scipy.optimize.minimize(fun=self.objective_function,\r\n jac=self.gradient,\r\n method=\"BFGS\")\r\n self.best_guess = result.x", "def nnObjFunction(params, *args):\n\n n_input, n_hidden, n_class, training_data, training_label, lambdaval = args\n\n w1 = params[0:n_hidden * (n_input + 1)].reshape((n_hidden, (n_input + 1)))\n w2 = params[(n_hidden * (n_input + 1)):].reshape((n_class, (n_hidden + 1)))\n obj_val = 0\n\n a = np.array([])\n z = np.array([])\n b = np.array([])\n o = np.array([])\n JW1W2 = 0\n JW1 = np.array([])\n JW2 = np.array([])", "def _init_objective(self) -> None:\n raise NotImplementedError(\"You should implement this!\")", "def add_minimize(self, co, var):", "def lobpcg(A, X, B=..., M=..., Y=..., tol=..., maxiter=..., largest=..., verbosityLevel=..., retLambdaHistory=..., retResidualNormsHistory=...):\n ...", "def _create_w_objective(m, X):\n genes, clusters = m.shape\n cells = X.shape[1]\n m_sum = m.sum(0)\n def objective(w):\n # convert w into a matrix first... because it's a vector for\n # optimization purposes\n w = w.reshape((m.shape[1], X.shape[1]))\n d = m.dot(w)+eps\n # derivative of objective wrt all elements of w\n # for w_{ij}, the derivative is... m_j1+...+m_jn sum over genes minus \n # x_ij\n temp = X/d\n m2 = m.T.dot(temp)\n deriv = m_sum.reshape((clusters, 1)) - m2\n return np.sum(d - X*np.log(d))/genes, deriv.flatten()/genes\n return objective", "def objective(par_arr):\n fit_params = copy.copy(params)\n for par, value in zip([p for p in params if params[p].vary], par_arr):\n fit_params[par].value = value\n return self.log_likelihood(fit_params, eval_gradient=False)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Waits to verify the bucket reflects the encryption settings
def wait_for_update(bucket, key_arn): response = client.get_bucket_encryption(Bucket=bucket) failure_counter = 0 while not 'ServerSideEncryptionConfiguration' in response and \ 'Rules' in response['ServerSideEncryptionConfiguration'] and \ 'ApplyServerSideEncryptionByDefault' in response['ServerSideEncryptionConfiguration']['Rules'][0] and \ 'KMSMasterKeyID' in response['ServerSideEncryptionConfiguration']['Rules'][0]['ApplyServerSideEncryptionByDefault'] and \ key_arn == response['ServerSideEncryptionConfiguration']['Rules'][0]['ApplyServerSideEncryptionByDefault']['KMSMasterKeyID']: if failure_counter > 5: print("Bucket not reflecting encryption update, aborting") sys.exit(1) failure_counter += 1 time.sleep(10)
[ "def aws_s3_bucket_encryption_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:\n s3 = session.client(\"s3\")\n # ISO Time\n iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()\n for buckets in list_buckets(cache, session):\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(buckets,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n bucketName = buckets[\"Name\"]\n s3Arn = f\"arn:{awsPartition}:s3:::{bucketName}\"\n try:\n response = s3.get_bucket_encryption(Bucket=bucketName)\n for rules in response[\"ServerSideEncryptionConfiguration\"][\"Rules\"]:\n sseType = str(\n rules[\"ApplyServerSideEncryptionByDefault\"][\"SSEAlgorithm\"]\n )\n # this is a passing check\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": s3Arn + \"/s3-bucket-encryption-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": s3Arn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices\",\n \"Effects/Data Exposure\",\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[S3.1] Amazon S3 buckets should be encrypted\",\n \"Description\": f\"Amazon S3 bucket \"\n + bucketName\n + \" is encrypted using \"\n + sseType\n + \".\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on Bucket Encryption and how to configure it refer to the Amazon S3 Default Encryption for S3 buckets section of the Amazon Simple Storage Service Developer Guide\",\n \"Url\": \"https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": global_region_generator(awsPartition),\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Storage\",\n \"AssetService\": \"Amazon S3\",\n \"AssetComponent\": \"Bucket\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsS3Bucket\",\n \"Id\": s3Arn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.DS-1\",\n \"NIST SP 800-53 Rev. 4 MP-8\",\n \"NIST SP 800-53 Rev. 4 SC-12\",\n \"NIST SP 800-53 Rev. 4 SC-28\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.8.2.3\",\n \"CIS Amazon Web Services Foundations Benchmark V1.5 2.1.1\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding\n except Exception as e:\n if (\n str(e)\n == \"An error occurred (ServerSideEncryptionConfigurationNotFoundError) when calling the GetBucketEncryption operation: The server side encryption configuration was not found\"\n ):\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": s3Arn + \"/s3-bucket-encryption-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": s3Arn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices\",\n \"Effects/Data Exposure\",\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"HIGH\"},\n \"Confidence\": 99,\n \"Title\": \"[S3.1] Amazon S3 buckets should be encrypted\",\n \"Description\": f\"Amazon S3 bucket \"\n + bucketName\n + \" is not encrypted. Refer to the remediation instructions if this configuration is not intended.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on Bucket Encryption and how to configure it refer to the Amazon S3 Default Encryption for S3 buckets section of the Amazon Simple Storage Service Developer Guide\",\n \"Url\": \"https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": global_region_generator(awsPartition),\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Storage\",\n \"AssetService\": \"Amazon S3\",\n \"AssetComponent\": \"Bucket\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsS3Bucket\",\n \"Id\": s3Arn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.DS-1\",\n \"NIST SP 800-53 Rev. 4 MP-8\",\n \"NIST SP 800-53 Rev. 4 SC-12\",\n \"NIST SP 800-53 Rev. 4 SC-28\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.8.2.3\",\n \"CIS Amazon Web Services Foundations Benchmark V1.5 2.1.1\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n print(e)", "def test_get_bucket_success(self):\n bucket = self.cm.get_bucket(\"testVaultName\")\n self.assertEqual(bucket.name, \"testVaultName\")\n self.assertEqual(bucket.id, 274)", "def test_get_cortx_s3_access_key_success():\n config = CORTXS3Config(use_cipher = False)\n config._config['cortx_s3']['background_account_secret_key'] = \"uw13JTMmOFzqz86eaMSbJAFd1CCB7oujkAXX4r+A\"\n s3_secret_key = config.get_cortx_s3_secret_key()\n assert s3_secret_key == \"uw13JTMmOFzqz86eaMSbJAFd1CCB7oujkAXX4r+A\"", "def test_get_bucket_versioning_config_enabled(self):\n query_factory = mock_query_factory(payload.sample_s3_get_bucket_versioning_enabled_result)\n def check_query_args(passthrough):\n self.assertEqual(query_factory.credentials.access_key, \"foo\")\n self.assertEqual(query_factory.credentials.secret_key, \"bar\")\n self.assertEqual(\n RequestDetails(\n service=b\"s3\",\n region=REGION_US_EAST_1,\n method=b\"GET\",\n url_context=client.s3_url_context(self.endpoint, \"mybucket\", \"?versioning\"),\n content_sha256=EMPTY_CONTENT_SHA256,\n ),\n query_factory.details,\n )\n return passthrough\n\n def check_results(versioning_config):\n self.assertEquals(versioning_config.status, 'Enabled')\n\n creds = AWSCredentials(\"foo\", \"bar\")\n s3 = client.S3Client(creds, query_factory=query_factory)\n d = s3.get_bucket_versioning_config(\"mybucket\")\n d.addCallback(check_query_args)\n d.addCallback(check_results)\n return d", "def test_get_cortx_s3_access_key_success():\n config = CORTXS3Config(use_cipher = False)\n config._config['cortx_s3']['background_account_access_key'] = \"S_YU-hMoQH2BWtza2tLtVg\"\n s3_access_key = config.get_cortx_s3_access_key()\n assert s3_access_key == \"S_YU-hMoQH2BWtza2tLtVg\"", "def aws_s3_bucket_versioning_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:\n s3 = session.client(\"s3\")\n # ISO Time\n iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()\n for buckets in list_buckets(cache, session):\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(buckets,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n bucketName = buckets[\"Name\"]\n s3Arn = f\"arn:{awsPartition}:s3:::{bucketName}\"\n try:\n ver = s3.get_bucket_versioning(Bucket=bucketName)\n if \"Status\" in ver:\n if ver[\"Status\"] == \"Enabled\":\n bucketVersioned = True\n else:\n bucketVersioned = False\n else:\n bucketVersioned = False\n except ClientError or KeyError:\n bucketVersioned = False\n \n if bucketVersioned is False:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": s3Arn + \"/s3-bucket-versioning-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": s3Arn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices\"\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[S3.3] Amazon S3 buckets should have versioning enabled\",\n \"Description\": f\"Amazon S3 bucket \"\n + bucketName\n + \" has versioning enabled. Refer to the remediation instructions if this configuration is not intended.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on Bucket Versioning and how to configure it refer to the Using Versioning section of the Amazon Simple Storage Service Developer Guide\",\n \"Url\": \"https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": global_region_generator(awsPartition),\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Storage\",\n \"AssetService\": \"Amazon S3\",\n \"AssetComponent\": \"Bucket\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsS3Bucket\",\n \"Id\": s3Arn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 ID.BE-5\",\n \"NIST CSF V1.1 PR.IP-4\",\n \"NIST CSF V1.1 PR.PT-5\",\n \"NIST SP 800-53 Rev. 4 CP-2\",\n \"NIST SP 800-53 Rev. 4 CP-4\",\n \"NIST SP 800-53 Rev. 4 CP-6\",\n \"NIST SP 800-53 Rev. 4 CP-7\",\n \"NIST SP 800-53 Rev. 4 CP-8\",\n \"NIST SP 800-53 Rev. 4 CP-9\",\n \"NIST SP 800-53 Rev. 4 CP-11\",\n \"NIST SP 800-53 Rev. 4 CP-13\",\n \"NIST SP 800-53 Rev. 4 PL-8\",\n \"NIST SP 800-53 Rev. 4 SA-14\",\n \"NIST SP 800-53 Rev. 4 SC-6\",\n \"AICPA TSC A1.2\",\n \"AICPA TSC A1.3\",\n \"AICPA TSC CC3.1\",\n \"ISO 27001:2013 A.11.1.4\",\n \"ISO 27001:2013 A.12.3.1\",\n \"ISO 27001:2013 A.17.1.1\",\n \"ISO 27001:2013 A.17.1.2\",\n \"ISO 27001:2013 A.17.1.3\",\n \"ISO 27001:2013 A.17.2.1\",\n \"ISO 27001:2013 A.18.1.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": s3Arn + \"/s3-bucket-versioning-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": s3Arn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices\"\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[S3.3] Amazon S3 buckets should have versioning enabled\",\n \"Description\": f\"Amazon S3 bucket \"\n + bucketName\n + \" does not have versioning enabled. Refer to the remediation instructions if this configuration is not intended.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on Bucket Versioning and how to configure it refer to the Using Versioning section of the Amazon Simple Storage Service Developer Guide\",\n \"Url\": \"https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": global_region_generator(awsPartition),\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Storage\",\n \"AssetService\": \"Amazon S3\",\n \"AssetComponent\": \"Bucket\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsS3Bucket\",\n \"Id\": s3Arn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 ID.BE-5\",\n \"NIST CSF V1.1 PR.IP-4\",\n \"NIST CSF V1.1 PR.PT-5\",\n \"NIST SP 800-53 Rev. 4 CP-2\",\n \"NIST SP 800-53 Rev. 4 CP-4\",\n \"NIST SP 800-53 Rev. 4 CP-6\",\n \"NIST SP 800-53 Rev. 4 CP-7\",\n \"NIST SP 800-53 Rev. 4 CP-8\",\n \"NIST SP 800-53 Rev. 4 CP-9\",\n \"NIST SP 800-53 Rev. 4 CP-11\",\n \"NIST SP 800-53 Rev. 4 CP-13\",\n \"NIST SP 800-53 Rev. 4 PL-8\",\n \"NIST SP 800-53 Rev. 4 SA-14\",\n \"NIST SP 800-53 Rev. 4 SC-6\",\n \"AICPA TSC A1.2\",\n \"AICPA TSC A1.3\",\n \"AICPA TSC CC3.1\",\n \"ISO 27001:2013 A.11.1.4\",\n \"ISO 27001:2013 A.12.3.1\",\n \"ISO 27001:2013 A.17.1.1\",\n \"ISO 27001:2013 A.17.1.2\",\n \"ISO 27001:2013 A.17.1.3\",\n \"ISO 27001:2013 A.17.2.1\",\n \"ISO 27001:2013 A.18.1.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\",\n }\n yield finding", "def test_buckets_access_authorized(self):\n self.client.login(username='user', password='userexample')\n\n response = self.client.get(reverse('buckets:list'))\n self.assertContains(response, 'bucket start')", "def aws_s3_bucket_policy_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:\n s3 = session.client(\"s3\")\n # ISO Time\n iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()\n for buckets in list_buckets(cache, session):\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(buckets,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n bucketName = buckets[\"Name\"]\n s3Arn = f\"arn:{awsPartition}:s3:::{bucketName}\"\n # Check to see if there is a policy at all\n try:\n s3.get_bucket_policy(Bucket=bucketName)\n bucketHasPolicy = True\n except ClientError:\n bucketHasPolicy = False\n # this is a failing check\n if bucketHasPolicy is False:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{s3Arn}/s3-bucket-policy-exists-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"{s3Arn}/s3-bucket-policy-exists-check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices\"\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"MEDIUM\"},\n \"Confidence\": 99,\n \"Title\": \"[S3.5] Amazon S3 buckets should have a bucket policy configured\",\n \"Description\": f\"Amazon S3 bucket {bucketName} does not have a bucket policy configured. A bucket policy is a resource-based policy that you can use to grant access permissions to your Amazon S3 bucket and the objects in it. Only the bucket owner can associate a policy with a bucket. The permissions attached to the bucket apply to all of the objects in the bucket that are owned by the bucket owner. These permissions do not apply to objects that are owned by other AWS accounts. S3 Object Ownership is an Amazon S3 bucket-level setting that you can use to control ownership of objects uploaded to your bucket and to disable or enable ACLs. By default, Object Ownership is set to the Bucket owner enforced setting and all ACLs are disabled. The bucket owner owns all the objects in the bucket and manages access to data exclusively using policies. Refer to the remediation instructions if this configuration is not intended.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on Bucket Policies and how to configure it refer to the Using bucket policies section of the Amazon Simple Storage Service Developer Guide\",\n \"Url\": \"https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucket-policies.html\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": global_region_generator(awsPartition),\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Storage\",\n \"AssetService\": \"Amazon S3\",\n \"AssetComponent\": \"Bucket\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsS3Bucket\",\n \"Id\": s3Arn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.AC-3\",\n \"NIST CSF V1.1 PR.AC-4\",\n \"NIST CSF V1.1 PR.DS-5\",\n \"NIST SP 800-53 Rev. 4 AC-1\",\n \"NIST SP 800-53 Rev. 4 AC-2\",\n \"NIST SP 800-53 Rev. 4 AC-3\",\n \"NIST SP 800-53 Rev. 4 AC-4\",\n \"NIST SP 800-53 Rev. 4 AC-5\",\n \"NIST SP 800-53 Rev. 4 AC-6\",\n \"NIST SP 800-53 Rev. 4 AC-14\",\n \"NIST SP 800-53 Rev. 4 AC-16\",\n \"NIST SP 800-53 Rev. 4 AC-17\",\n \"NIST SP 800-53 Rev. 4 AC-19\",\n \"NIST SP 800-53 Rev. 4 AC-20\",\n \"NIST SP 800-53 Rev. 4 AC-24\",\n \"NIST SP 800-53 Rev. 4 PE-19\",\n \"NIST SP 800-53 Rev. 4 PS-3\",\n \"NIST SP 800-53 Rev. 4 PS-6\",\n \"NIST SP 800-53 Rev. 4 SC-7\",\n \"NIST SP 800-53 Rev. 4 SC-8\",\n \"NIST SP 800-53 Rev. 4 SC-13\",\n \"NIST SP 800-53 Rev. 4 SC-15\",\n \"NIST SP 800-53 Rev. 4 SC-31\",\n \"NIST SP 800-53 Rev. 4 SI-4\",\n \"AICPA TSC CC6.3\",\n \"AICPA TSC CC6.6\",\n \"AICPA TSC CC7.2\",\n \"ISO 27001:2013 A.6.1.2\",\n \"ISO 27001:2013 A.6.2.1\",\n \"ISO 27001:2013 A.6.2.2\",\n \"ISO 27001:2013 A.7.1.1\",\n \"ISO 27001:2013 A.7.1.2\",\n \"ISO 27001:2013 A.7.3.1\",\n \"ISO 27001:2013 A.8.2.2\",\n \"ISO 27001:2013 A.8.2.3\",\n \"ISO 27001:2013 A.9.1.1\",\n \"ISO 27001:2013 A.9.1.2\",\n \"ISO 27001:2013 A.9.2.3\",\n \"ISO 27001:2013 A.9.4.1\",\n \"ISO 27001:2013 A.9.4.4\",\n \"ISO 27001:2013 A.9.4.5\",\n \"ISO 27001:2013 A.10.1.1\",\n \"ISO 27001:2013 A.11.1.4\",\n \"ISO 27001:2013 A.11.1.5\",\n \"ISO 27001:2013 A.11.2.1\",\n \"ISO 27001:2013 A.11.2.6\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.1.3\",\n \"ISO 27001:2013 A.13.2.1\",\n \"ISO 27001:2013 A.13.2.3\",\n \"ISO 27001:2013 A.13.2.4\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\",\n \"CIS Amazon Web Services Foundations Benchmark V1.5 2.1.5\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{s3Arn}/s3-bucket-policy-exists-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"{s3Arn}/s3-bucket-policy-exists-check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices\"\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[S3.5] Amazon S3 buckets should have a bucket policy configured\",\n \"Description\": f\"Amazon S3 bucket {bucketName} does have a bucket policy configured.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on Bucket Policies and how to configure it refer to the Using bucket policies section of the Amazon Simple Storage Service Developer Guide\",\n \"Url\": \"https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucket-policies.html\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": global_region_generator(awsPartition),\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Storage\",\n \"AssetService\": \"Amazon S3\",\n \"AssetComponent\": \"Bucket\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsS3Bucket\",\n \"Id\": s3Arn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.AC-3\",\n \"NIST CSF V1.1 PR.AC-4\",\n \"NIST CSF V1.1 PR.DS-5\",\n \"NIST SP 800-53 Rev. 4 AC-1\",\n \"NIST SP 800-53 Rev. 4 AC-2\",\n \"NIST SP 800-53 Rev. 4 AC-3\",\n \"NIST SP 800-53 Rev. 4 AC-4\",\n \"NIST SP 800-53 Rev. 4 AC-5\",\n \"NIST SP 800-53 Rev. 4 AC-6\",\n \"NIST SP 800-53 Rev. 4 AC-14\",\n \"NIST SP 800-53 Rev. 4 AC-16\",\n \"NIST SP 800-53 Rev. 4 AC-17\",\n \"NIST SP 800-53 Rev. 4 AC-19\",\n \"NIST SP 800-53 Rev. 4 AC-20\",\n \"NIST SP 800-53 Rev. 4 AC-24\",\n \"NIST SP 800-53 Rev. 4 PE-19\",\n \"NIST SP 800-53 Rev. 4 PS-3\",\n \"NIST SP 800-53 Rev. 4 PS-6\",\n \"NIST SP 800-53 Rev. 4 SC-7\",\n \"NIST SP 800-53 Rev. 4 SC-8\",\n \"NIST SP 800-53 Rev. 4 SC-13\",\n \"NIST SP 800-53 Rev. 4 SC-15\",\n \"NIST SP 800-53 Rev. 4 SC-31\",\n \"NIST SP 800-53 Rev. 4 SI-4\",\n \"AICPA TSC CC6.3\",\n \"AICPA TSC CC6.6\",\n \"AICPA TSC CC7.2\",\n \"ISO 27001:2013 A.6.1.2\",\n \"ISO 27001:2013 A.6.2.1\",\n \"ISO 27001:2013 A.6.2.2\",\n \"ISO 27001:2013 A.7.1.1\",\n \"ISO 27001:2013 A.7.1.2\",\n \"ISO 27001:2013 A.7.3.1\",\n \"ISO 27001:2013 A.8.2.2\",\n \"ISO 27001:2013 A.8.2.3\",\n \"ISO 27001:2013 A.9.1.1\",\n \"ISO 27001:2013 A.9.1.2\",\n \"ISO 27001:2013 A.9.2.3\",\n \"ISO 27001:2013 A.9.4.1\",\n \"ISO 27001:2013 A.9.4.4\",\n \"ISO 27001:2013 A.9.4.5\",\n \"ISO 27001:2013 A.10.1.1\",\n \"ISO 27001:2013 A.11.1.4\",\n \"ISO 27001:2013 A.11.1.5\",\n \"ISO 27001:2013 A.11.2.1\",\n \"ISO 27001:2013 A.11.2.6\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.1.3\",\n \"ISO 27001:2013 A.13.2.1\",\n \"ISO 27001:2013 A.13.2.3\",\n \"ISO 27001:2013 A.13.2.4\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\",\n \"CIS Amazon Web Services Foundations Benchmark V1.5 2.1.5\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "def test_get_bucket_conn__auth_fail():\n\n null_options = pypicloud_tools.S3Config('test', None, None, None, None)\n with pytest.raises(SystemExit) as error:\n pypicloud_tools.get_bucket_conn(null_options)\n\n assert \"~/.aws/credentials\" in error.value.args[0]", "def wait_to_secret_creation(self, secret_name, namespace):\n try:\n self.get(name=secret_name, namespace=namespace)\n return True\n except K8sNotFoundException:\n return False", "def test_check_public_block(self):\n self.patch(s3.FilterPublicBlock, \"executor_factory\", MainThreadExecutor)\n self.patch(s3, \"S3_AUGMENT_TABLE\", [])\n\n session_factory = self.replay_flight_data(\"test_s3_check_public_block\")\n p = self.load_policy(\n {\n \"name\": \"check-public-block\",\n \"resource\": \"s3\",\n \"filters\": [\n {\n \"type\": \"check-public-block\",\n }\n ],\n },\n session_factory=session_factory,\n )\n\n resources = {bucket[\"Name\"]: bucket for bucket in p.run()}\n self.assertEqual(len(resources), 3)\n locked_down_bucket = resources[\"my-locked-down-bucket\"]\n self.assertIn(\"GetPublicAccessBlock\", locked_down_bucket[\"c7n:DeniedMethods\"])", "def test_get_bucket_versioning_config(self):\n query_factory = mock_query_factory(payload.sample_s3_get_bucket_versioning_result)\n def check_query_args(passthrough):\n self.assertEqual(query_factory.credentials.access_key, \"foo\")\n self.assertEqual(query_factory.credentials.secret_key, \"bar\")\n self.assertEqual(\n RequestDetails(\n service=b\"s3\",\n region=REGION_US_EAST_1,\n method=b\"GET\",\n url_context=client.s3_url_context(self.endpoint, \"mybucket\", \"?versioning\"),\n content_sha256=EMPTY_CONTENT_SHA256,\n ),\n query_factory.details,\n )\n return passthrough\n\n def check_results(versioning_config):\n self.assertEquals(versioning_config.status, None)\n\n creds = AWSCredentials(\"foo\", \"bar\")\n s3 = client.S3Client(creds, query_factory=query_factory)\n d = s3.get_bucket_versioning_config(\"mybucket\")\n d.addCallback(check_query_args)\n d.addCallback(check_results)\n return d", "def aws_s3_bucket_lifecycle_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:\n s3 = session.client(\"s3\")\n # ISO Time\n iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()\n for buckets in list_buckets(cache, session):\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(buckets,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n bucketName = buckets[\"Name\"]\n s3Arn = f\"arn:{awsPartition}:s3:::{bucketName}\"\n try:\n s3.get_bucket_lifecycle_configuration(Bucket=bucketName)\n lifecycleConfig = True\n except ClientError:\n lifecycleConfig = False\n \n if lifecycleConfig is False:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": s3Arn + \"/s3-bucket-lifecyle-configuration-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": s3Arn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices\"\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[S3.2] Amazon S3 buckets should implement lifecycle policies for data archival and recovery operations\",\n \"Description\": f\"Amazon S3 bucket {bucketName} has a lifecycle policy configured.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on Lifecycle policies and how to configure it refer to the How Do I Create a Lifecycle Policy for an S3 Bucket? section of the Amazon Simple Storage Service Developer Guide\",\n \"Url\": \"https://docs.aws.amazon.com/AmazonS3/latest/user-guide/create-lifecycle.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": global_region_generator(awsPartition),\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Storage\",\n \"AssetService\": \"Amazon S3\",\n \"AssetComponent\": \"Bucket\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsS3Bucket\",\n \"Id\": s3Arn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.DS-3\",\n \"NIST SP 800-53 Rev. 4 CM-8\",\n \"NIST SP 800-53 Rev. 4 MP-6\",\n \"NIST SP 800-53 Rev. 4 PE-16\",\n \"AICPA TSC CC6.1\",\n \"AICPA TSC CC6.5\",\n \"ISO 27001:2013 A.8.2.3\",\n \"ISO 27001:2013 A.8.3.1\",\n \"ISO 27001:2013 A.8.3.2\",\n \"ISO 27001:2013 A.8.3.3\",\n \"ISO 27001:2013 A.11.2.5\",\n \"ISO 27001:2013 A.11.2.7\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": s3Arn + \"/s3-bucket-lifecyle-configuration-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": s3Arn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices\"\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"LOW\"},\n \"Confidence\": 99,\n \"Title\": \"[S3.2] Amazon S3 buckets should implement lifecycle policies for data archival and recovery operations\",\n \"Description\": f\"Amazon S3 bucket {bucketName} does not have a lifecycle policy configured. S3 Lifecycle Policies can help lower data management tasks, lower storage costs, and get rid of corrupted or incomplete objects within your buckets. You can configure S3 to move objects to lower cost storage such as Infrequent Access or you can send objects to long-term storage in Amazon Glacier. If you have regulatory or industry compliance requirements to store certain types of data or logs, lifecycle policies is an automatable and auditable way to accomplish that. Likewise, if you have requirements to delete data after a certain amount of time a lifecycle policy can also accomodate that requirement. Refer to the remediation instructions if this configuration is not intended.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on Lifecycle policies and how to configure it refer to the How Do I Create a Lifecycle Policy for an S3 Bucket? section of the Amazon Simple Storage Service Developer Guide\",\n \"Url\": \"https://docs.aws.amazon.com/AmazonS3/latest/user-guide/create-lifecycle.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": global_region_generator(awsPartition),\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Storage\",\n \"AssetService\": \"Amazon S3\",\n \"AssetComponent\": \"Bucket\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsS3Bucket\",\n \"Id\": s3Arn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.DS-3\",\n \"NIST SP 800-53 Rev. 4 CM-8\",\n \"NIST SP 800-53 Rev. 4 MP-6\",\n \"NIST SP 800-53 Rev. 4 PE-16\",\n \"AICPA TSC CC6.1\",\n \"AICPA TSC CC6.5\",\n \"ISO 27001:2013 A.8.2.3\",\n \"ISO 27001:2013 A.8.3.1\",\n \"ISO 27001:2013 A.8.3.2\",\n \"ISO 27001:2013 A.8.3.3\",\n \"ISO 27001:2013 A.11.2.5\",\n \"ISO 27001:2013 A.11.2.7\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\",\n }\n yield finding", "def wait_for_key_response():\n timeout = 30.0\n while len(key_input) < 20 and timeout > 0:\n logging.debug(\"Waiting for encryption key...\")\n sleep(0.25)\n timeout -= 0.25\n if timeout == 0:\n logging.error(\n \"Error: timeout reached waiting for encryption key response.\")\n quit(2)", "def athena_workgroup_encryption_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:\n athena = session.client(\"athena\")\n # ISO time\n iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()\n # loop work groups from cache\n for wgroup in list_work_groups(cache, session):\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(wgroup,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n workgroupName = wgroup[\"Name\"]\n workgroupArn = f\"arn:{awsPartition}:athena:{awsRegion}:{awsAccountId}:workgroup/{workgroupName}\"\n # get specific details from workgroup\n wginfo = athena.get_work_group(WorkGroup=workgroupName)[\"WorkGroup\"]\n # determine if there is an encryption - this dict will be missing if it is not\n try:\n encryptionOption = wginfo[\"Configuration\"][\"ResultConfiguration\"][\"EncryptionConfiguration\"][\"EncryptionOption\"]\n except KeyError:\n encryptionOption = \"NO_ENCRYPTION\"\n # map the various encryption options (NO_ENCRYPTION, SSE_S3, SSE_KMS, and CSE_KMS)\n # this is a failing check (high severity)\n if encryptionOption == \"NO_ENCRYPTION\":\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{workgroupArn}/athena-workgroup-query-encryption-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": workgroupArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices\",\n \"Effects/Data Exposure\"\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"HIGH\"},\n \"Confidence\": 99,\n \"Title\": \"[Athena.1] Athena workgroups should be configured to enforce query result encryption\",\n \"Description\": f\"Athena workgroup {workgroupName} does not enforce query result encryption. You set up query result encryption using the Athena console or when using JDBC or ODBC. Workgroups allow you to enforce the encryption of query results. Refer to the remediation instructions to remediate this behavior.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on Athena query result encryption refer to the Encrypting Athena query results stored in Amazon S3 section in the Amazon Athena User Guide.\",\n \"Url\": \"https://docs.aws.amazon.com/athena/latest/ug/encrypting-query-results-stored-in-s3.html\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Analytics\",\n \"AssetService\": \"Amazon Athena\",\n \"AssetComponent\": \"Workgroup\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsAthenaWorkGroup\",\n \"Id\": workgroupArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"Name\": workgroupName,\n \"State\": wginfo[\"State\"],\n \"CreationTime\": str(wginfo[\"CreationTime\"])\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.DS-1\",\n \"NIST SP 800-53 Rev. 4 MP-8\",\n \"NIST SP 800-53 Rev. 4 SC-12\",\n \"NIST SP 800-53 Rev. 4 SC-28\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.8.2.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n # this is a failing check (low severity)\n elif encryptionOption == \"SSE_S3\":\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{workgroupArn}/athena-workgroup-query-encryption-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": workgroupArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices\",\n \"Effects/Data Exposure\"\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"LOW\"},\n \"Confidence\": 99,\n \"Title\": \"[Athena.1] Athena workgroups should be configured to enforce query result encryption\",\n \"Description\": f\"Athena workgroup {workgroupName} enforces query result encryption, however it uses an AWS-managed server side encryption key. AWS-SSE encryption uses an AWS managed key which does not have the ability to add compensating controls to such as a Key Policy which can help prevent malicious access to your data. You set up query result encryption using the Athena console or when using JDBC or ODBC. Workgroups allow you to enforce the encryption of query results. Refer to the remediation instructions to remediate this behavior.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on Athena query result encryption refer to the Encrypting Athena query results stored in Amazon S3 section in the Amazon Athena User Guide.\",\n \"Url\": \"https://docs.aws.amazon.com/athena/latest/ug/encrypting-query-results-stored-in-s3.html\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Analytics\",\n \"AssetService\": \"Amazon Athena\",\n \"AssetComponent\": \"Workgroup\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsAthenaWorkGroup\",\n \"Id\": workgroupArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"Name\": workgroupName,\n \"State\": wginfo[\"State\"],\n \"CreationTime\": str(wginfo[\"CreationTime\"])\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.DS-1\",\n \"NIST SP 800-53 Rev. 4 MP-8\",\n \"NIST SP 800-53 Rev. 4 SC-12\",\n \"NIST SP 800-53 Rev. 4 SC-28\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.8.2.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n # this is a passing check\n elif encryptionOption == (\"SSE_KMS\" or \"CSE_KMS\"):\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{workgroupArn}/athena-workgroup-query-encryption-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": workgroupArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices\",\n \"Effects/Data Exposure\"\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[Athena.1] Athena workgroups should be configured to enforce query result encryption\",\n \"Description\": f\"Athena workgroup {workgroupName} enforces query result encryption.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on Athena query result encryption refer to the Encrypting Athena query results stored in Amazon S3 section in the Amazon Athena User Guide.\",\n \"Url\": \"https://docs.aws.amazon.com/athena/latest/ug/encrypting-query-results-stored-in-s3.html\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Analytics\",\n \"AssetService\": \"Amazon Athena\",\n \"AssetComponent\": \"Workgroup\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsAthenaWorkGroup\",\n \"Id\": workgroupArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"Name\": workgroupName,\n \"State\": wginfo[\"State\"],\n \"CreationTime\": str(wginfo[\"CreationTime\"])\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.DS-1\",\n \"NIST SP 800-53 Rev. 4 MP-8\",\n \"NIST SP 800-53 Rev. 4 SC-12\",\n \"NIST SP 800-53 Rev. 4 SC-28\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.8.2.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding\n # this is a cautionary function in case encryption options are ever expanded\n else:\n print(f\"Athena workgroup {workgroupName} has an encryption option of {encryptionOption} which was not accounted for...\")\n continue", "def _verify_blob_existance(handle, bucket, key):\n try:\n handle.get(bucket=bucket, key=key)\n except BlobNotFoundError:\n return False\n return True", "def validate_s3_credentials(s3_access_key_id, s3_secret_access_key,\n s3_endpoint, s3_region, s3_bucket,\n use_ssl, s3_signature_version):\n\n s3_config_object = None\n if s3_signature_version != 'default' and s3_signature_version != '':\n s3_config_object = botocore.client.Config(\n signature_version=s3_signature_version)\n\n s3_client = boto3.client('s3',\n region_name=s3_region,\n use_ssl=use_ssl,\n aws_access_key_id=s3_access_key_id,\n aws_secret_access_key=s3_secret_access_key,\n endpoint_url=s3_endpoint,\n config=s3_config_object)\n\n s3_client.head_bucket(Bucket=s3_bucket)\n\n # Add a check to see if the current object store will support\n # our path length.\n long_key = os.path.join(\n 'tvault_config/',\n 'workload_f5190be6-7f80-4856-8c24-149cb40500c5/',\n 'snapshot_f2e5c6a7-3c21-4b7f-969c-915bb408c64f/',\n 'vm_id_e81d1ac8-b49a-4ccf-9d92-5f1ef358f1be/',\n 'vm_res_id_72477d99-c475-4a5d-90ae-2560f5f3b319_vda/',\n 'deac2b8a-dca9-4415-adc1-f3c6598204ed-segments/',\n '0000000000000000.00000000')\n s3_client.put_object(\n Bucket=s3_bucket, Key=long_key, Body='Test Data')\n\n s3_client.delete_object(Bucket=s3_bucket, Key=long_key)\n\n return {'status': 'Success'}", "def _wait_result(exp_prefix, exp_name, timeout):\n result_path = os.path.join(config.LOG_DIR, \"s3\", exp_prefix, exp_name, 'params.pkl')\n print(\"Polling for results in\",result_path) \n try:\n file_handle = polling.poll(\n lambda: open(result_path),\n ignore_exceptions=(IOError,),\n timeout=timeout,\n step=60)\n file_handle.close()\n except polling.TimeoutException:\n return False\n return True", "def validate_login_credentials(self):\r\n\r\n self.set_credentails_with_env(self.__client_config_path)\r\n try:\r\n self.__storage_client = storage.Client() # try to validate credentials if success method return true else stop here\r\n print(\"Google Client Authentication set success\")\r\n except:\r\n raise Exception('Authentication failed for Google cloud storage')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Stop our generator loop when we've exceeded our temperature boundary
def stopGeneratorLoop(temp, start, stop): if start > stop and temp < stop: return True elif start < stop and temp > stop: return True return False
[ "def stopping_condition_is_met(self) -> bool:\n return self.iter >= self.max_iter", "def stop(self, iterations):\n self.stop_count += iterations", "def _end_condition(self):\n return self._num_iters >= self._max_iter", "def wait_temperature(trigTemp):\n previous = preTemp\n while ((preTemp-trigTemp)*(previous-trigTemp)>0):\n preTemp_ref = preTemp \n time.sleep(0.5)", "def yeast_temp(self, request_number):\n tmp = self.read_temp(request_number)\n # noinspection PyRedundantParentheses\n while (tmp > 80 or tmp < 60):\n try:\n print(\"\\t\\b***Temperature of yeast is out of range.***\")\n print(\" ***Bring another yeast and measure temperature again.*** \\n\")\n time.sleep(sleep_time * 3)\n status_log = \"{\\\"batch_id\\\":\\\"\" + request_number + \"\\\", \\\"brew_batch_stage\\\":\\\"Prep\\\", \\\"log\\\":\\\"Temperature\\\"}\"\n sn_log = ServiceNowLog()\n ServiceNowLog.create_new_log(sn_log, status_log)\n self.log_no = self.log_no + 1\n log = Log(self.log_no, \"Prep.Temperature\", \"Temperature of yeast is not in range.\",\n datetime.datetime.now(),\n \"fail\")\n print(log.generate_log())\n time.sleep(sleep_time * 2)\n ml = MongoLogging.MongoLogging()\n MongoLogging.MongoLogging.MongoLog(ml, request_number, \"Prep.Temperature\",\n \"Temperature of yeast is not in range.\")\n time.sleep(sleep_time)\n except Exception as e:\n print(e)\n tmp = self.read_temp(request_number)\n try:\n print(\" Temperature of yeast is in range and ready to use.\\n\")\n time.sleep(sleep_time * 2)\n status_log = \"{\\\"batch_id\\\":\\\"\" + request_number + \"\\\", \\\"brew_batch_stage\\\":\\\"Prep\\\", \\\"log\\\":\\\"Temperature\\\"}\"\n sn_log = ServiceNowLog()\n ServiceNowLog.create_new_log(sn_log, status_log)\n self.log_no = self.log_no + 1\n log = Log(self.log_no, \"Prep.Temperature\", \"Temperature of yeast measured.\", datetime.datetime.now(),\n \"pass\")\n print(log.generate_log())\n time.sleep(sleep_time * 2)\n ml = MongoLogging.MongoLogging()\n MongoLogging.MongoLogging.MongoLog(ml, request_number, \"Prep.Temperature\",\n \"Temperature of yeast measured\")\n time.sleep(sleep_time * 2)\n except Exception as e:\n\n # Exception: checks if measurement has failed\n print(e)\n status_log = \"{\\\"batch_id\\\":\\\"\" + request_number + \"\\\", \\\"brew_batch_stage\\\":\\\"Prep\\\", \\\"log\\\":\\\"Temperature\\\"}\"\n sn_log = ServiceNowLog()\n ServiceNowLog.create_new_log(sn_log, status_log)\n self.log_no = self.log_no + 1\n log = Log(self.log_no, \"Prep.Temperature\", \"Failed to measure temperature of yeast\",\n datetime.datetime.now(), \"fail\")\n print(log.generate_log())\n time.sleep(sleep_time * 2)\n ml = MongoLogging.MongoLogging()\n MongoLogging.MongoLogging.MongoLog(ml, request_number, \"Prep.Temperature\",\n \"Failed to measure temperature of yeast\")\n time.sleep(sleep_time)", "def monitor_tank(self, env):\n while True:\n if self.fuel_tank.level / self.fuel_tank.capacity < THRESHOLD:\n print('Calling tanker truck at %d' % env.now)\n yield env.process(tanker(env, self))\n\n yield env.timeout(10) # Check every 10 minutes", "def target_temperature_step(self):# -> Optional[float]:\r\n return 1", "def start_temperature_control_thread():\n def control_temperature():\n while True: \n try:\n actual_target_C = get_actual_target_temperature_C() \n current_temperature_C = hardware.temperature_probes.get_current_temperature()\n if _is_device_overriden() | (current_temperature_C is None) | (actual_target_C is None): raise StopControlThread # skip iteration\n # the great and (not so) efficient algorithm!\n if misc_utils.is_within_distance(current_temperature_C, actual_target_C, configuration.temperature_tolerance()): \n _set_heater(False); _set_freezer(False) \n elif current_temperature_C < actual_target_C:\n _set_heater(True); _set_freezer(False)\n elif current_temperature_C > actual_target_C:\n _set_heater(False); _set_freezer(True)\n except StopControlThread as e:\n # nothing, let loop re-iterate\n pass\n except Exception as e:\n print 'Error while setting temperature:\\n' + str(e) \n time.sleep(configuration.control_temperature_interval_seconds()) \n control_temperature_thread = Thread(target=control_temperature, args=())\n control_temperature_thread.daemon = True\n control_temperature_thread.start()", "def temp_cold() -> bool:\n record = db_io.latest_records(\"sensor_temp\")[0]\n return float(record[0]) < homeauto_config.TEMP_LO", "def stop(self):\n return _raw_util.raw_divide_ff_sptr_stop(self)", "def feedback_T(self):\n if self.counter_T_feedback_reset_bool == 1:\n self.counter_T_feedback = self.t[-1]\n self.counter_T_feedback_reset_bool = 0\n else:\n if (self.t[-1] - self.counter_T_feedback) > self.T_delta_t.get(): # so every self.T_delta_t seconds\n self.counter_T_feedback_reset_bool = 1\n try:\n last_piezo_voltage = np.mean(self.dat[0][-10:]) # tries to average over the last 5 temperature values if possible\n except:\n last_piezo_voltage = self.dat[0][-1] # takes last value of data array of the index corresponding to the name piezo_voltage\n if abs(last_piezo_voltage) < self.minimum_voltage: # if there's no voltage on the input channel, lockbox is probably off\n self.setOutOfLock(True)\n else:\n self.logger.debug(\"Possibly changing temperature\")\n change_temperature = 0\n # It is possible that we want to change the temperature\n if last_piezo_voltage > self.Vpi_lim_high.get():\n # voltage too high. Decrease the temperature\n # by .1 degrees\n # check when the last temperature change was\n new_set_temp = self.T_set.get() - 1\n # check that the new set temperature is not\n # outside the limits\n if new_set_temp > self.T_min.get():\n self.T_set.set(new_set_temp)\n change_temperature = 1\n else:\n self.logger.error('Temp feedback reached\\\n higher specified limit')\n if last_piezo_voltage < self.Vpi_lim_low.get():\n new_set_temp = self.T_set.get() + 1\n if new_set_temp < self.T_max.get():\n self.T_set.set(new_set_temp)\n change_temperature = 1\n else:\n self.logger.error('Temp feedback\\\n reached lower specified limit')\n\n if change_temperature == 1:\n self.logger.info(\n 'Changing baseplate temperature to %f degrees c'\n % (self.T_set.get()/10.) )\n\n # time.sleep(0.3)\n try:\n changedTemperature = serialChiller.setTemperature(serialPort = self.ser, temp = self.T_set.get())\n self.logger.info(\n 'CHANGED baseplate temperature at t = %d s to %d' % \n (round(self.t[-1]), new_set_temp))\n except:\n self.logger.error('Could not change set \\\ntemperature at t = %d' % self.t[-1])\n\n if changedTemperature != -1 and changedTemperature > self.T_min.get() and changedTemperature < self.T_max.get():\n # double check it's okay\n self.T_set.set(changedTemperature)\n else: # in case just the reply from the changed temperature hanged, we ask for the temperature one more time manually\n if changedTemperature != -1 and changedTemperature > self.T_min.get() and changedTemperature < self.T_max.get() :\n self.T_set.set(changedTemperature)\n print 'determined changed baseplate by manually asking, instead of the reply from serial_Chiller255p.setTemperature'", "def gas_station_control(env, fuel_pump):\n while True:\n if fuel_pump.level / fuel_pump.capacity * 100 < THRESHOLD:\n # We need to call the tank truck now!\n txt = ('Calling tank truck at %d' % env.now).encode()\n producer.send(\"gasStation\", txt)\n #producer.send(\"gasStation\", {'msg': txt})\n # Wait for the tank truck to arrive and refuel the station\n yield env.process(tank_truck(env, fuel_pump))\n\n yield env.timeout(10) # Check every 10 seconds", "def tail_waggler(done):\n m = ev3.MediumMotor(); assert m.connected\n\n while not done.is_set():\n m.run_timed(speed_sp=90, time_sp=1000, stop_action='coast')\n time.sleep(1)\n ev3.Sound.play('rattle-snake.wav').wait()\n m.run_timed(speed_sp=-90, time_sp=1000, stop_action='coast')\n time.sleep(2)", "def stop():\n global _is_running\n\n if not _is_running:\n return\n _is_running = False\n for t in _generator_threads:\n t.get()\n _generator_pool.close()\n Loggers.general.info(\"Stopped transaction generator...\")", "def _step(self):\n\n spin_pos = self.lattice.get_random_spin_position()\n deltaE = self.J * self.lattice.energy_difference(spin_pos)\n \n accept = False\n\n if deltaE < 0:\n accept = True\n else:\n r = random.random()\n if r <= self._exp[deltaE]:\n accept = True\n\n if accept: ## flip spin\n self.lattice.energy += deltaE\n self.lattice.flip_spin(spin_pos)\n\n return accept", "def CheckTemperatures(self, temperatures, elapsed):\n for index, temperature_value in enumerate(temperatures):\n with self.group_checker:\n testlog.LogParam('elapsed', elapsed)\n testlog.LogParam('sensor', self.sensors[index])\n testlog.CheckNumericParam('temperature',\n temperature_value,\n min=self.args.lower_threshold[index],\n max=self.args.temperature_limit[index])\n\n self.max_temperature[index] = max(\n self.max_temperature[index], temperature_value)\n\n if not self.heated_up[index] and (\n temperature_value >= self.args.lower_threshold[index]):\n self.heated_up[index] = True\n event_log.Log('heated', temperature_value=temperature_value,\n lower_threshold=self.args.lower_threshold[index],\n sensor=self.sensors[index],\n elapsed_sec=elapsed)\n logging.info('Sensor %s heated up to %d C in %d seconds.',\n self.sensors[index],\n self.args.lower_threshold[index], elapsed)\n\n if temperature_value > self.args.temperature_limit[index]:\n event_log.Log('over_heated', temperature_value=temperature_value,\n temperature_limit=self.args.temperature_limit[index],\n sensor=self.sensors[index],\n elapsed_sec=elapsed)\n self.fail('Sensor %s temperature got over %d.' % (\n self.sensors[index], self.args.temperature_limit[index]))\n\n if elapsed >= self.args.heat_up_timeout_secs and (\n not self.heated_up[index]):\n event_log.Log('slow_temp_slope', temperature_value=temperature_value,\n lower_threshold=self.args.lower_threshold[index],\n sensor=self.sensors[index],\n timeout=self.args.heat_up_timeout_secs)\n logging.info('temperature track: %r', self.temperatures_track)\n self.fail(\"Temperature %s didn't go over %d in %s seconds.\" % (\n self.sensors[index],\n self.args.lower_threshold[index],\n self.args.heat_up_timeout_secs))\n\n if self.args.temperatures_difference:\n difference = max(temperatures) - min(temperatures)\n if difference > self.args.temperatures_difference:\n logging.info('temperature track: %r', self.temperatures_track)\n self.fail('The difference of temperatures %d exceeds the limit %d.' % (\n difference, self.args.temperatures_difference))", "def stop(data):\n raise StopIteration()", "def gen_after_eden_stopped(self) -> None:\n pass", "def termination(self, u, t, step_no):\n tol = 0.01 #Stop when solution is 0.01°C close to asymptotic value Ts\n return abs(u[step_no]- self.Ts) < tol" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a command, mode, and a PATH string, return the path which conforms to the given mode on the PATH, or None if there is no such file. `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result of os.environ.get("PATH"), or can be overridden with a custom search path.
def which(cmd, mode=os.F_OK | os.X_OK, path=None): # Check that a given file can be accessed with the correct mode. # Additionally check that `file` is not a directory, as on Windows # directories pass the os.access check. def _access_check(fn, mode): return (os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)) # If we're given a path with a directory part, look it up directly rather # than referring to PATH directories. This includes checking relative to the # current directory, e.g. ./script if os.path.dirname(cmd): if _access_check(cmd, mode): return cmd return None if path is None: path = os.environ.get("PATH", os.defpath) if not path: return None path = path.split(os.pathsep) if sys.platform == "win32": # The current directory takes precedence on Windows. if os.curdir not in path: path.insert(0, os.curdir) # PATHEXT is necessary to check on Windows. pathext = os.environ.get("PATHEXT", "").split(os.pathsep) # See if the given file matches any of the expected path extensions. # This will allow us to short circuit when given "python.exe". # If it does match, only test that one, otherwise we have to try # others. if any(cmd.lower().endswith(ext.lower()) for ext in pathext): files = [cmd] else: files = [cmd + ext for ext in pathext] else: # On other platforms you don't have things like PATHEXT to tell you # what file suffixes are executable, so just pass on cmd as-is. files = [cmd] seen = set() for dir in path: normdir = os.path.normcase(dir) if normdir not in seen: seen.add(normdir) for thefile in files: name = os.path.join(dir, thefile) if _access_check(name, mode): return name return None
[ "def which(cmd, mode=os.F_OK | os.X_OK, path=None):\n def _access_check(fn, mode):\n return (os.path.exists(fn) and os.access(fn, mode)\n and not os.path.isdir(fn))\n\n # If we're given a path with a directory part, look it up directly rather\n # than referring to PATH directories. This includes checking relative to the\n # current directory, e.g. ./script\n if os.path.dirname(cmd):\n if _access_check(cmd, mode):\n return cmd\n return None\n\n use_bytes = isinstance(cmd, bytes)\n\n if path is None:\n path = os.environ.get(\"PATH\", None)\n if path is None:\n try:\n path = os.confstr(\"CS_PATH\")\n except (AttributeError, ValueError):\n # os.confstr() or CS_PATH is not available\n path = os.defpath\n # bpo-35755: Don't use os.defpath if the PATH environment variable is\n # set to an empty string\n\n # PATH='' doesn't match, whereas PATH=':' looks in the current directory\n if not path:\n return None\n\n path = os.fsdecode(path)\n path = path.split(os.pathsep)\n\n if sys.platform == \"win32\":\n # The current directory takes precedence on Windows.\n if os.curdir not in path:\n path.insert(0, os.curdir)\n\n # PATHEXT is necessary to check on Windows.\n pathext = os.environ.get(\"PATHEXT\", \"\").split(os.path.sep)\n # See if the given file matches any of the expected path extensions.\n # This will allow us to short circuit when given \"python.exe\".\n # If it does match, only test that one, otherwise we have to try\n # others.\n if any(cmd.lower().endswith(ext.lower()) for ext in pathext):\n files = [cmd]\n else:\n files = [cmd + ext for ext in pathext]\n else:\n # On other platforms you don't have things like PATHEXT to tell you\n # what file suffixes are executable, so just pass on cmd as-is.\n files = [cmd]\n\n seen = set()\n for dir in path:\n normdir = os.path.normcase(dir)\n if not normdir in seen:\n seen.add(normdir)\n for thefile in files:\n name = os.path.join(dir, thefile)\n if _access_check(name, mode):\n return name\n return None", "def get_command_from_path(self, cmd):\n for path in os.environ[\"PATH\"].split(os.pathsep):\n cmd_path = os.path.join(path, cmd)\n if os.access(cmd_path, os.X_OK):\n return cmd_path\n return \"\"", "def get_command(logger, path, name, level=logging.ERROR):\n\n cmd_file = None\n if path:\n cmd_file = which(path)\n if not is_exe(cmd_file):\n log(level, \"file {} is not executable file\".\n format(path))\n return None\n else:\n cmd_file = which(name)\n if not cmd_file:\n # try to search within dirname()\n cmd_file = which(name,\n path=os.path.dirname(sys.argv[0]))\n if not cmd_file:\n log(level, \"cannot determine path to the {} command\".\n format(name))\n return None\n logger.debug(\"{} = {}\".format(name, cmd_file))\n\n return cmd_file", "def which(filename, search_path=os.environ['PATH']):\n\n file_found = False\n\n for path in search_path.split(pathsep):\n if exists(join(path, filename)):\n file_found = True\n break\n\n if file_found:\n return abspath(join(path, filename))\n else:\n return None", "def get_real_command(cmd):\n if sys.platform == 'win32':\n # we in fact expect pure module name (without extension)\n # so, lets remove extension\n if os.path.splitext(cmd)[1] == '.py':\n cmd = cmd[:-3]\n # PATHEXT is necessary to check on Windows (force lowercase)\n pathext = list(map(lambda x: x.lower(),\n os.environ['PATHEXT'].split(os.pathsep)))\n if '.py' not in pathext:\n # we assume that PATHEXT contains always '.py'\n os.environ['PATHEXT'] = '.py;' + os.environ['PATHEXT']\n full_path = shutil_which(cmd + '.py')\n if full_path:\n return full_path\n\n return cmd", "def find_mode_to_use(pathname, owner, mode):\n if owner in get_owner_string(pathname):\n if DEFAULT_OWNER in owner:\n mode_value = mode[DEFAULT_OWNER]\n else:\n mode_value = mode['other']\n else:\n mode_value = mode['other']\n\n # Default permissions are for a file. If pathname is a directory, then\n # make it owner and group executable as well\n if os.path.isdir(pathname):\n mode_value = mode_value | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH\n\n return mode_value", "def find_executable(cmd, env=None):\n cmd = add_python_cmd(cmd)\n\n def _is_executable(candidate):\n return os.path.isfile(candidate) and os.access(candidate, os.X_OK)\n\n # anti_sep is like os.path.altsep, but it's always defined\n anti_sep = '/' if os.path.sep == '\\\\' else '\\\\'\n cmd = [cmd[0].replace(anti_sep, os.path.sep)] + cmd[1:]\n\n # exts are the file extensions to try. If the command already has an extension\n # or we're not on windows, then we don't try any extensions.\n has_ext = bool(os.path.splitext(cmd[0])[1])\n exts = ('',) if sys.platform != 'win32' or has_ext else ('.exe', '.bat')\n\n def _resolve_extension(candidate):\n for ext in exts:\n resolved = candidate + ext\n if _is_executable(resolved):\n return resolved\n return None\n\n # If the command is absolute or relative to cwd, check it directly and do not\n # consult $PATH.\n if os.path.sep in cmd[0]:\n # abspath is a noop on an already-absolute path\n resolved = _resolve_extension(os.path.abspath(cmd[0]))\n if resolved:\n cmd = [resolved] + cmd[1:]\n return cmd\n\n # We have a non-absolute, non-relative executable, so walk PATH.\n paths = (os.environ if env is None else env).get('PATH', '').split(os.pathsep)\n for path in ['.'] + paths:\n if path == '':\n continue\n resolved = _resolve_extension(os.path.join(os.path.abspath(path), cmd[0]))\n if resolved:\n cmd = [resolved] + cmd[1:]\n break\n\n return cmd", "def get_path(executable, log=None):\n code, out, err = run_cmd('which {}'.format(executable))\n if code != 0 or err == '{} not found'.format(executable):\n raise PathError('{} is not in your path'.format(executable), log)\n else:\n return os.path.abspath(out)", "def find_executable(executable, path=None):\n if path is None:\n path = os.environ['PATH']\n paths = path.split(os.pathsep)\n ext_list = ['']\n\n if os.name == 'os2':\n (base, ext) = os.path.splitext(executable)\n # executable files on OS/2 can have an arbitrary extension, but\n # .exe is automatically appended if no dot is present in the name\n if not ext:\n executable = executable + \".exe\"\n elif sys.platform == 'win32':\n path_ext = os.environ['PATHEXT'].lower().split(os.pathsep)\n (base, ext) = os.path.splitext(executable)\n if ext.lower() not in path_ext:\n ext_list = path_ext\n print('path_ext=', path_ext, ', base=', base, ', ext=', ext,\n 'ext_list=', ext_list)\n for ext in ext_list:\n exec_name = executable + ext\n if os.path.isfile(exec_name):\n return exec_name\n else:\n for p in paths:\n f = os.path.join(p, exec_name)\n if os.path.isfile(f):\n return f\n else:\n return None", "def xopen_or_none(self, path, mode):\n if path is None:\n return None\n return self.xopen(path, mode)", "def fspath(path):\n return os.fspath(path) if hasattr(os, \"fspath\") else str(path)", "def get_command(self,\n key: keyutils.KeySequence,\n mode: str,\n default: bool = False) -> Optional[str]:\n self._validate(key, mode)\n if default:\n bindings = dict(val.bindings.default[mode])\n else:\n bindings = self.get_bindings_for(mode)\n return bindings.get(key, None)", "def which(executable_name, env_var='PATH'):\n exec_fp = None\n\n if env_var in os.environ:\n paths = os.environ[env_var]\n\n for path in paths.split(os.pathsep):\n curr_exec_fp = os.path.join(path, executable_name)\n\n if os.access(curr_exec_fp, os.X_OK):\n exec_fp = curr_exec_fp\n break\n\n return exec_fp", "def which(exe_str):\n paths = os.environ.get('PATH', None)\n resolved_exe = None\n\n if paths is None:\n # log warning\n msg = \"PATH env var is not defined.\"\n log.error(msg)\n return resolved_exe\n\n for path in paths.split(\":\"):\n exe_path = os.path.join(path, exe_str)\n # print exe_path\n if os.path.exists(exe_path):\n resolved_exe = exe_path\n break\n\n # log.debug(\"Resolved cmd {e} to {x}\".format(e=exe_str, x=resolved_exe))\n return resolved_exe", "def which(path, exefile):\n for p in (path or \"\").split(';'):\n next = os.path.join(p, exefile)\n if os.path.exists(next):\n return next\n\n return \"\"", "def open_file(path, mode):\n max_attempts = 100\n f = None\n for _ in range(max_attempts): # pragma: no branch\n try:\n f = open(path, mode)\n except PermissionError: # pragma: no cover\n continue\n break\n return f", "def fspath(path):\n # type: (t.Union[os.Pathlike, str, bytes]) -> t.Union[str, bytes]\n if isinstance(path, (str, bytes)):\n return path\n\n # Work from the object's type to match method resolution of other magic\n # methods.\n path_type = type(path)\n try:\n path = path_type.__fspath__(path)\n except AttributeError:\n if hasattr(path_type, \"__fspath__\"):\n raise\n else:\n if isinstance(path, (str, bytes)):\n return path\n else:\n raise TypeError(\n \"expected __fspath__() to return str or bytes, \"\n \"not \" + type(path).__name__\n )\n\n raise TypeError(\n \"expected str, bytes or os.PathLike object, not \"\n + path_type.__name__\n )", "def file_or_path(strictmodes=False, strictparams=False, **argmap):\n\n if strictmodes and strictparams:\n raise ValueError(\n 'Only one of strictmodes or strictparams can be specified.'\n )\n\n OPEN_KWDS = inspect.getfullargspec(builtins.open).args\n\n @wrapt.decorator\n def inner(wrapped, instance, args, kw):\n w_args = inspect.getcallargs(wrapped, *args, **kw)\n managed = []\n to_reopen = []\n for _name in argmap:\n _val = w_args.get(_name, None)\n if _val is None:\n continue\n if not is_file_like(_val):\n # throw here??\n managed.append((_name, _val))\n else:\n # This is file-like. Test modes if strictness specified\n if strictmodes:\n try:\n desired_mode = argmap[_name]['mode']\n except KeyError:\n raise ValueError('strictmodes requires a target mode.')\n try:\n actual_mode = _val.mode\n if desired_mode != actual_mode:\n to_reopen.append(\n (_name, _val, {'mode': desired_mode})\n )\n except AttributeError as e:\n raise UnmodifiableModeError(_val) from e\n elif strictparams:\n desired_params = argmap[_name].copy()\n try:\n for key in desired_params:\n if key not in OPEN_KWDS:\n raise TypeError(\n \"'{}' is not a valid keyword argument\"\n \"\".format(key)\n )\n except (TypeError, AttributeError) as e:\n raise UnmodifiableAttributeError((_val, key)) from e\n\n # Always attempt to preserve mode\n if 'mode' not in desired_params:\n try:\n mode = _val.mode\n desired_params['mode'] = mode\n except AttributeError as e:\n pass\n to_reopen.append((_name, _val, desired_params))\n\n with contextlib.ExitStack() as stack:\n for _key, _path in managed:\n _kwargs = argmap[_key]\n try:\n w_args[_key] = stack.enter_context(open(_path, **_kwargs))\n except TypeError as e:\n raise AttributeError(*e.args) from e\n for _key, _file, _kwargs in to_reopen:\n # TODO(zeroslack): handle possible OSError due to seek, tell...\n try:\n w_args[_key] = stack.enter_context(\n reopen(_file, **_kwargs)\n )\n except TypeError as e:\n raise UnmodifiableAttributeError((_val, *e.args)) from e\n return wrapped.__call__(**w_args)\n\n return inner", "def which(searchFile) :\n for searchPath in os.environ[\"PATH\"].split(os.pathsep):\n test=os.path.join(searchPath,searchFile)\n if os.path.isfile(test): return test\n\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of all `Card`s held by this object.
def GetCards(self): return self.cards
[ "def get_all_cards(self):\n\t\tquery_str = [\n\t\t\t\"SELECT * FROM cards;\"\n\t\t]\n\t\tself.c.execute(\n\t\t\tstr.join(\" \", query_str)\n\t\t)\n\t\tcardsData = self.c.fetchall()\n\t\tcards = []\n\t\tfor row in cardsData:\n\t\t\tcard = RFIDCard(str(row[0]), row[2], row[1] == 1)\n\t\t\tcards.append(card)\n\t\treturn tuple(cards)", "def all_cards(self):\n for i in range(len(__class__.card_suits) * len(__class__.card_values)):\n suit = __class__.card_suits[i // len(__class__.card_values)]\n value = __class__.card_values[i % len(__class__.card_values)]\n yield __class__(suit=suit, value=value)", "def get_cards(self):\r\n return self.deck", "def cards(self):\n try:\n return self.game.cards[self.player_id]\n except AttributeError:\n raise ValueError('Cannot access cards: player is unassigned.')", "def get_cards():\n Card = namedtuple('Card', 'rank suit')\n ranks = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']\n suits = ['spades', 'hearts', 'diamonds', 'clubs']\n full_deck = [Card(suit, rank) for suit in suits for rank in ranks]\n return full_deck", "def create_all_cards(self):\r\n all_cards = []\r\n\r\n for value in Card.values:\r\n for symbols in self.choose_symbols_color():\r\n all_cards.append(f'{value} {symbols}')\r\n return all_cards", "def getHearthstoneCards(self):\n return self.cache.get(HEARTHSTONE_CARD_API, fallback=[])", "def cards(self):\n\t\treturn [btn.card for btn in self._buttonsSelected]", "def build(self):\n cards = []\n # for each suit\n for s in self.SUITS:\n # for each rank\n for r in self.RANKS:\n # create a new card\n card = Card(s, r)\n # set's the image src\n card.set_image_src(CARD_IMAGE_SRC)\n # set the back image src\n card.set_back_image_src(CARD_BACK_IMAGE_SRC)\n # set's the card size\n card.set_size(CARD_IMAGE_SIZE)\n # add the new card into the list\n cards.append(card)\n return cards", "def GetContents(self):\n return [h for h in self.cards if isinstance(h, Content)]", "def get_cards(self, token):\n cards = display(CustomerCard.get_all_cards(customer_id=token.customer_id))\n return {'cards': cards}", "def get_cards(soup):\n return soup.findAll(\"div\", {\"class\": \"card\"})", "def get_cards(self, expansion=None):\n print(\"getting cards\")\n session = Session()\n cards = session.query(Card).filter(\n #Card.data['is_enhanced']=='false',\n Card.data['is_maverick']=='false')\n if expansion:\n cards = cards.filter(Card.data['expansion']==str(expansion))\n cards = cards.all()\n card_expansion = {}\n for card in cards:\n key = card.name+\";\"+str(card.data['expansion'])+\";\"+card.data['rarity']\n # Prefer non-enhanced non-maverick card that is in current_set\n if key in card_expansion:\n if card.data['is_enhanced']:\n continue\n if not card.is_from_current_set:\n continue\n if card.data['is_maverick']:\n continue\n card_expansion[key] = card\n print([card.data['expansion'] for card in card_expansion.values() if card.name=='Mookling'])\n print(len(cards))\n print(len(card_expansion.values()))\n return card_expansion.values()", "def get_cards(self):\n for c in sorted(self.cards, key=lambda card: card.data['house']):\n for i in range(self.data['_links']['cards'].count(c.key)):\n c.data['is_legacy'] = c.key in self.data.get('set_era_cards',{}).get('Legacy',[])\n c.data['bonus_icons'] = []\n for bonus_card in self.data.get(\"bonus_icons\", []):\n if bonus_card[\"card_id\"] == c.key:\n c.data['bonus_icons'] = bonus_card['bonus_icons']\n yield c", "def get_active_cards(self):\n\t\tquery_str = [\n\t\t\t\"SELECT * FROM cards\",\n\t\t\t\"WHERE cards.synced = '1'\",\n\t\t\t\"OR cards.synced = 'True';\"\n\t\t]\n\t\tself.c.execute(\n\t\t\tstr.join(\" \", query_str)\n\t\t)\n\t\tcardsData = self.c.fetchall()\n\t\tcards = []\n\t\tfor row in cardsData:\n\t\t\tcard = RFIDCard(str(row[0]), row[2], row[1] == 1)\n\t\t\tcards.append(card)\n\t\treturn tuple(cards)", "def fetch_cards_of_suit(self, suit):\n\n def sort_by_value(card):\n \"\"\" Returns the value of the card based on it's value name \"\"\"\n return VALUE_BY_NAME[card.value]\n\n cards_of_suit = [card for card in self.cards if suit == card.suit]\n\n # Sort for easy viewing.\n cards_of_suit.sort(key=sort_by_value)\n return cards_of_suit", "def flashcards(self):\n\n return self.session.query(db.Flashcard)", "def show(self):\n for c in self.cards:\n print(c)", "def findCardsByNum(self, number):\n if self.verbose:\n print(self.name + \" finding all cards of number \" + str(number))\n if self.log is not None:\n self.log.write(self.name + \" finding all cards of number \" + str(number) + \"\\n\")\n result = []\n for card in self.hand:\n if card.get_number() == number:\n result.append(card)\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of all `Header` `Card`s.
def GetHeaders(self): return [h for h in self.cards if isinstance(h, Header)]
[ "def raw_header_cards(self):\n return []", "def raw_header_cards(self):\n return ['DISPERSE', 'TILTPOS', 'INSFILTE']", "def get_all_cards(self):\n\t\tquery_str = [\n\t\t\t\"SELECT * FROM cards;\"\n\t\t]\n\t\tself.c.execute(\n\t\t\tstr.join(\" \", query_str)\n\t\t)\n\t\tcardsData = self.c.fetchall()\n\t\tcards = []\n\t\tfor row in cardsData:\n\t\t\tcard = RFIDCard(str(row[0]), row[2], row[1] == 1)\n\t\t\tcards.append(card)\n\t\treturn tuple(cards)", "def GetCards(self):\n return self.cards", "def list(self):\n return list(sorted(self.header_folder.glob(\"*\")))", "def getHearthstoneCards(self):\n return self.cache.get(HEARTHSTONE_CARD_API, fallback=[])", "def get_cards():\n Card = namedtuple('Card', 'rank suit')\n ranks = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']\n suits = ['spades', 'hearts', 'diamonds', 'clubs']\n full_deck = [Card(suit, rank) for suit in suits for rank in ranks]\n return full_deck", "def get_cards(soup):\n return soup.findAll(\"div\", {\"class\": \"card\"})", "def get_header_chain(self) -> List[Header]:\n return list(self.chain.keys())", "def headers(self) -> List[TableData]:\n return self.get_children(appended_xpath=\"//tr/th\")", "def headers(self):\n header_list = []\n for index in range(self.size):\n kw = self[index]\n header_list.append( kw.header )\n return header_list", "def list_headers():\n return (\n 'Name',\n 'Port',\n 'Scheme',\n 'PID',\n 'Created',\n )", "def create_all_cards(self):\r\n all_cards = []\r\n\r\n for value in Card.values:\r\n for symbols in self.choose_symbols_color():\r\n all_cards.append(f'{value} {symbols}')\r\n return all_cards", "def getHeaderChain(self) -> list:\n return self.__headerChain", "def cards(self):\n\t\treturn [btn.card for btn in self._buttonsSelected]", "def all_cards(self):\n for i in range(len(__class__.card_suits) * len(__class__.card_values)):\n suit = __class__.card_suits[i // len(__class__.card_values)]\n value = __class__.card_values[i % len(__class__.card_values)]\n yield __class__(suit=suit, value=value)", "def find_headers(self, url):\n soup = self.create_soup(url)\n headers = soup.find_all(\"th\")\n \n #if there are no headers, return None\n if len(headers) == 0:\n return None\n \n headers_list = list()\n \n for header in headers:\n header_text = header.get_text()\n #no duplicate headers\n if header_text not in headers_list:\n headers_list.append(header.get_text())\n \n return headers_list", "def displayHand(self):\n \n handyList = []\n \n for card in self.hand:\n handyList.append(card.name())\n \n return handyList", "def get_cards(self, token):\n cards = display(CustomerCard.get_all_cards(customer_id=token.customer_id))\n return {'cards': cards}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of all `Content` `Card`s.
def GetContents(self): return [h for h in self.cards if isinstance(h, Content)]
[ "def GetCards(self):\n return self.cards", "def get_cards(soup):\n return soup.findAll(\"div\", {\"class\": \"card\"})", "def get_cards(self, token):\n cards = display(CustomerCard.get_all_cards(customer_id=token.customer_id))\n return {'cards': cards}", "def get_all_cards(self):\n\t\tquery_str = [\n\t\t\t\"SELECT * FROM cards;\"\n\t\t]\n\t\tself.c.execute(\n\t\t\tstr.join(\" \", query_str)\n\t\t)\n\t\tcardsData = self.c.fetchall()\n\t\tcards = []\n\t\tfor row in cardsData:\n\t\t\tcard = RFIDCard(str(row[0]), row[2], row[1] == 1)\n\t\t\tcards.append(card)\n\t\treturn tuple(cards)", "def cards(self):\n\t\treturn [btn.card for btn in self._buttonsSelected]", "def create_all_cards(self):\r\n all_cards = []\r\n\r\n for value in Card.values:\r\n for symbols in self.choose_symbols_color():\r\n all_cards.append(f'{value} {symbols}')\r\n return all_cards", "def get_cards(self):\r\n return self.deck", "def get_cards():\n Card = namedtuple('Card', 'rank suit')\n ranks = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']\n suits = ['spades', 'hearts', 'diamonds', 'clubs']\n full_deck = [Card(suit, rank) for suit in suits for rank in ranks]\n return full_deck", "def getHearthstoneCards(self):\n return self.cache.get(HEARTHSTONE_CARD_API, fallback=[])", "def get_all():\n all_args = request.args\n cards = MTGModel.get_all_cards(all_args)\n return custom_response(cards, 200)", "def get_cards(self):\n for c in sorted(self.cards, key=lambda card: card.data['house']):\n for i in range(self.data['_links']['cards'].count(c.key)):\n c.data['is_legacy'] = c.key in self.data.get('set_era_cards',{}).get('Legacy',[])\n c.data['bonus_icons'] = []\n for bonus_card in self.data.get(\"bonus_icons\", []):\n if bonus_card[\"card_id\"] == c.key:\n c.data['bonus_icons'] = bonus_card['bonus_icons']\n yield c", "def all_cards(self):\n for i in range(len(__class__.card_suits) * len(__class__.card_values)):\n suit = __class__.card_suits[i // len(__class__.card_values)]\n value = __class__.card_values[i % len(__class__.card_values)]\n yield __class__(suit=suit, value=value)", "def get_cards(self, expansion=None):\n print(\"getting cards\")\n session = Session()\n cards = session.query(Card).filter(\n #Card.data['is_enhanced']=='false',\n Card.data['is_maverick']=='false')\n if expansion:\n cards = cards.filter(Card.data['expansion']==str(expansion))\n cards = cards.all()\n card_expansion = {}\n for card in cards:\n key = card.name+\";\"+str(card.data['expansion'])+\";\"+card.data['rarity']\n # Prefer non-enhanced non-maverick card that is in current_set\n if key in card_expansion:\n if card.data['is_enhanced']:\n continue\n if not card.is_from_current_set:\n continue\n if card.data['is_maverick']:\n continue\n card_expansion[key] = card\n print([card.data['expansion'] for card in card_expansion.values() if card.name=='Mookling'])\n print(len(cards))\n print(len(card_expansion.values()))\n return card_expansion.values()", "def build(self):\n cards = []\n # for each suit\n for s in self.SUITS:\n # for each rank\n for r in self.RANKS:\n # create a new card\n card = Card(s, r)\n # set's the image src\n card.set_image_src(CARD_IMAGE_SRC)\n # set the back image src\n card.set_back_image_src(CARD_BACK_IMAGE_SRC)\n # set's the card size\n card.set_size(CARD_IMAGE_SIZE)\n # add the new card into the list\n cards.append(card)\n return cards", "def request_card_list(context):\n card_list, result = context.clients.card_service.listOperations.get_card_list(\n gameId=1337\n ).result()\n assert_that(result.status_code, equal_to(200))\n context.card_list = card_list", "def cards(self):\n try:\n return self.game.cards[self.player_id]\n except AttributeError:\n raise ValueError('Cannot access cards: player is unassigned.')", "def get_same_month_cards(self, card: Card) -> List[Card]:\n object_month = card.month\n cards = []\n for field_card in self.cards:\n if field_card.month == object_month:\n cards.append(field_card)\n return cards", "def show(self):\n for c in self.cards:\n print(c)", "def get_cards_for_board(board_id: int):\n checking_id = check_board_status()[1]\n return queries.get_cards(checking_id, board_id)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of all Content cards of the `kind`.
def GetContentsByKind(self, kind): return [c for c in self.GetContents() if c.GetKind() == kind or c.GetKind(long=True) == kind]
[ "def GetContents(self):\n return [h for h in self.cards if isinstance(h, Content)]", "def children_of_kind(content, kind, **kwargs):\n content_instance = get_instance_with_pk_or_uuid(content)\n return content_instance.get_descendants(include_self=False).filter(kind=kind)", "def get_cards(self, expansion=None):\n print(\"getting cards\")\n session = Session()\n cards = session.query(Card).filter(\n #Card.data['is_enhanced']=='false',\n Card.data['is_maverick']=='false')\n if expansion:\n cards = cards.filter(Card.data['expansion']==str(expansion))\n cards = cards.all()\n card_expansion = {}\n for card in cards:\n key = card.name+\";\"+str(card.data['expansion'])+\";\"+card.data['rarity']\n # Prefer non-enhanced non-maverick card that is in current_set\n if key in card_expansion:\n if card.data['is_enhanced']:\n continue\n if not card.is_from_current_set:\n continue\n if card.data['is_maverick']:\n continue\n card_expansion[key] = card\n print([card.data['expansion'] for card in card_expansion.values() if card.name=='Mookling'])\n print(len(cards))\n print(len(card_expansion.values()))\n return card_expansion.values()", "def GetCards(self):\n return self.cards", "def get_cards(self):\n for c in sorted(self.cards, key=lambda card: card.data['house']):\n for i in range(self.data['_links']['cards'].count(c.key)):\n c.data['is_legacy'] = c.key in self.data.get('set_era_cards',{}).get('Legacy',[])\n c.data['bonus_icons'] = []\n for bonus_card in self.data.get(\"bonus_icons\", []):\n if bonus_card[\"card_id\"] == c.key:\n c.data['bonus_icons'] = bonus_card['bonus_icons']\n yield c", "def get_cards(soup):\n return soup.findAll(\"div\", {\"class\": \"card\"})", "def cards(self):\n\t\treturn [btn.card for btn in self._buttonsSelected]", "def create_all_cards(self):\r\n all_cards = []\r\n\r\n for value in Card.values:\r\n for symbols in self.choose_symbols_color():\r\n all_cards.append(f'{value} {symbols}')\r\n return all_cards", "def get_cards(self, token):\n cards = display(CustomerCard.get_all_cards(customer_id=token.customer_id))\n return {'cards': cards}", "def get_cards():\n Card = namedtuple('Card', 'rank suit')\n ranks = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']\n suits = ['spades', 'hearts', 'diamonds', 'clubs']\n full_deck = [Card(suit, rank) for suit in suits for rank in ranks]\n return full_deck", "def get_cards(self):\r\n return self.deck", "def get_all():\n all_args = request.args\n cards = MTGModel.get_all_cards(all_args)\n return custom_response(cards, 200)", "def build(self):\n cards = []\n # for each suit\n for s in self.SUITS:\n # for each rank\n for r in self.RANKS:\n # create a new card\n card = Card(s, r)\n # set's the image src\n card.set_image_src(CARD_IMAGE_SRC)\n # set the back image src\n card.set_back_image_src(CARD_BACK_IMAGE_SRC)\n # set's the card size\n card.set_size(CARD_IMAGE_SIZE)\n # add the new card into the list\n cards.append(card)\n return cards", "def show(self):\n for c in self.cards:\n print(c)", "async def show_cards(self, ctx, hand):\r\n cards = \"\"\r\n for card in hand:\r\n cards += card.show()\r\n await ctx.send(cards)", "async def mc_list(self, ctx):\n cogs = await self.config.cogs()\n if not cogs:\n await ctx.send(\"There are no registered cogs.\")\n return\n for page in pagify(\", \".join(map(str, cogs))):\n await ctx.send(box(page))", "def fetch_cards_of_suit(self, suit):\n\n def sort_by_value(card):\n \"\"\" Returns the value of the card based on it's value name \"\"\"\n return VALUE_BY_NAME[card.value]\n\n cards_of_suit = [card for card in self.cards if suit == card.suit]\n\n # Sort for easy viewing.\n cards_of_suit.sort(key=sort_by_value)\n return cards_of_suit", "def getHearthstoneCards(self):\n return self.cache.get(HEARTHSTONE_CARD_API, fallback=[])", "def all_cards(self):\n for i in range(len(__class__.card_suits) * len(__class__.card_values)):\n suit = __class__.card_suits[i // len(__class__.card_values)]\n value = __class__.card_values[i % len(__class__.card_values)]\n yield __class__(suit=suit, value=value)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the nearest `Card` to `card` in the direction `direc`.
def GetNextCard(self, card, direc): # depending on the direction we compare a different side # of the cards, as well as get the points whose distance # we're going to calculate in a different way if direc == Deck.LEFT: side = lambda x: x.right getp1 = lambda x: x.GetTopLeft() getp2 = lambda x: x.GetBottomLeft() elif direc == Deck.RIGHT: side = lambda x: x.left getp1 = lambda x: x.GetTopLeft() getp2 = lambda x: x.GetTopRight() elif direc == Deck.UP: side = lambda x: x.bottom getp1 = lambda x: x.GetTopLeft() getp2 = lambda x: x.GetBottomLeft() elif direc == Deck.DOWN: side = lambda x: x.top getp1 = lambda x: x.GetBottomLeft() getp2 = lambda x: x.GetTopLeft() # get those cards whose "side" is in the desired position with respect to card rect = card.GetRect() nxt = [] if direc == Deck.LEFT or direc == Deck.UP: nxt = [c for c in self.GetCards() if side(c.GetRect()) < side(rect)] elif direc == Deck.RIGHT or direc == Deck.DOWN: nxt = [c for c in self.GetCards() if side(c.GetRect()) > side(rect)] else: return None # we're going to use getp1 to get a point in card and compare # it to the point got by getp2 on all the cards in nxt if nxt: # order them by distance nxt.sort(key=lambda x: utilities.dist2(getp1(x.GetRect()), getp2(rect))) # and return the nearest one return nxt[0] else: return None
[ "def find_closest(catalogue, ra, dec):\n output = [float('inf'), float('inf')]\n for row in catalogue:\n dist = angular_dist(row[1], row[2], ra, dec)\n if dist < output[1]:\n output = [row[0], dist]\n\n return output[0], output[1]", "def nearest_point(self, relative: tuple):\r\n dist1 = math.sqrt((relative[0] - self.cstart[0])**2 + (relative[1] - self.cstart[1])**2)\r\n dist2 = math.sqrt((relative[0] - self.cdest[0])**2 + (relative[1] - self.cdest[1])**2)\r\n if dist1 < dist2:\r\n return self.cstart\r\n else:\r\n return self.cdest", "def get_closest(self, point):\n distance = (self.dpath[:, 1] - point[1]) ** 2 + (self.dpath[:, 0] - point[0]) ** 2\n i = np.where(distance == distance.min())\n return i[0][0]", "def nearest_star_dumb(self, ra, dec):\n if not self._have_sources():\n logging.error(\"No sources loaded. Load data and try again.\")\n\n # Working coordinate arrays:\n sra = self._srcdata[self._ra_key].values\n sde = self._srcdata[self._de_key].values\n sep_deg = angle.dAngSep(ra, dec, sra, sde)\n origidx = np.argmin(sep_deg) # best match index in subset\n match = self._srcdata.iloc[[origidx]].copy()\n match['dist'] = sep_deg[origidx]\n return match", "def dist_and_dir_to_closest_monster(self):\n # Find all the monsters in the world\n location, count = self.locate_monsters()\n smallest_dist = self.world.height() * self.world.width()\n direction = -1\n for monster_loc in location:\n dist_to_mon = self.layer_dist(self.x, self.y, monster_loc[0], monster_loc[1])\n if dist_to_mon < smallest_dist:\n smallest_dist = dist_to_mon\n direction = self.dir_between_cells(self.x, self.y, monster_loc[0], monster_loc[1])\n if smallest_dist > 4:\n smallest_dist = 4 # If the monster is too far away, consider the distance as the character's max vision\n return smallest_dist, direction", "def get_closest(data_dir, id, scan_type, ref_date = datetime.now()):\n if not data_dir:\n data_dir = os.getcwd()\n scans = get_scans(data_dir, id, scan_type)\n closest = find_closest(scans, ref_date)\n return closest", "def relative_pos_closest(string, pos, char):\n left = string[:pos]\n right = string[pos:]\n try:\n left_dist = pos - left.rindex(char)\n except ValueError:\n left_dist = None\n try:\n right_dist = right.index(char)\n except ValueError:\n right_dist = None\n if left_dist is None:\n return right_dist\n elif right_dist is None:\n return -left_dist\n elif left_dist >= right_dist:\n return right_dist\n else:\n return -left_dist", "def get_point_at_distance(self, point, l, reverse=False):\n # Get closest point\n closest_i = self.get_closest(point)\n\n # Get distances\n distance = (self.dpath[:, 1] - point[1]) ** 2 + (self.dpath[:, 0] - point[0]) ** 2\n\n # Return closest if out of radius\n if distance.min() > l**2:\n return closest_i\n\n # Get points closest to desired distances\n tol = 4\n i_lst = np.where(abs(distance - l**2) < (tol*self.dp)**2)\n\n # Loop until there are only 2 points close to distance l\n loop = 0\n while len(i_lst[0]) != 2:\n #print(i_lst)\n if loop > 50:\n return closest_i\n i_lst = np.where(abs(distance - l ** 2) < (tol * self.dp) ** 2)\n if len(i_lst[0]) < 2:\n tol += 0.03\n else:\n tol -= 0.03\n loop += 1\n i1 = i_lst[0][0]\n i2 = i_lst[0][1]\n\n # Get the right point\n path_len = len(self.dpath)\n if i2 > i1:\n di = i2 - i1\n if di < path_len-di:\n if reverse:\n return i1\n return i2\n else:\n if reverse:\n return i2\n return i1\n else:\n di = i1 - i2\n if di < path_len - di:\n if reverse:\n return i2\n return i1\n else:\n if reverse:\n return i1\n return i2", "def closest_dirt(self):\r\n position = self.bot_pos\r\n dirts = self.get_dirts(position[0],position[1])\r\n if dirts:\r\n i, j = min(dirts,\r\n key=lambda dirt_pos:((position[0]-dirt_pos[0])**2+(position[1]-dirt_pos[1])**2)**0.5\r\n )\r\n return (i,j)", "def _get_closest_point_in_point_cloud(self, pixel):\n # Select only points that are in front.\n fwd_points = self.points[np.where(self.points[:, 2] > 0.0)]\n # Select x and y.\n pc_xy = fwd_points[:, 0:2]\n # Select z\n pc_z = fwd_points[:, 2]\n # Divize x, y by z\n normalized_pc = pc_xy / pc_z[:, None]\n xy = np.array([pixel.x, pixel.y]).transpose()\n # Compute distance\n dist = np.sum((normalized_pc - xy)**2, axis=1)\n # Select index of the closest point.\n closest_index = np.argmin(dist)\n # Return the closest point.\n return Location(fwd_points[closest_index][0],\n fwd_points[closest_index][1],\n fwd_points[closest_index][2])", "def getDistOnLoop(st, ed, circleSize, direction):\n if direction == 'C':\n if st > ed:\n return ed + circleSize - st\n else:\n return ed - st\n else:\n if st < ed:\n return st + circleSize - ed\n else: # st >= ed 正常情况\n return st - ed", "def play_card(self, rnd: PlayerRound) -> int:\r\n #Create Simulation stuff\r\n #simRound = get_round_from_player_round(rnd, rnd.hands)\r\n bestcard = self.montecarlotreesearch(rnd)\r\n\r\n return bestcard", "def closest_dropoff(ship, game):\n\n dropoffs = get_position_dropoff(game)\n\n closest_dropoff = [1000, None]\n\n for dropoff in dropoffs:\n\n distance = game.game_map.calculate_distance(ship.position, dropoff.position)\n\n if distance < closest_dropoff[0]:\n\n closest_dropoff = dropoff\n\n return closest_dropoff", "def _optimalDestination(self):\n destX,destY = self.path.pop(0)\n destX=destX%self.worldSize[0]\n destY=destY%self.worldSize[1]\n\n return specialMath.findClosest(self.realCenter, (destX, destY), self.worldSize)", "def get_closest(point, allpoints):\n best_index = None\n best_distance = 999999999\n is_dupe = False\n\n for index, p in enumerate(allpoints):\n # if p == point:\n # continue\n dist = getdist(point, p)\n if dist <= best_distance:\n if dist == best_distance:\n is_dupe = True\n else:\n is_dupe = False\n best_distance = dist\n best_index = index\n\n if is_dupe:\n return None\n\n return best_index", "def getCard(card_id=''):\n\tcard = None\n\tq = models.Card.query(models.Card.id == card_id.upper())\n\tif q and q.count > 0:\n\t\tcard = q.get()\n\treturn card", "def findPathToNearestTile(self,unit,tile,distance=1): \n # check which is the shortest path (4 possibilities)\n startTile=unit.getPos()\n x,y=tile \n \n map=self.player.game.getMap()\n wmap=map.getWalkMap(unit)\n \n astar = AStar.AStar(AStar.SQ_MapHandler(wmap,map.iMapWidth,map.iMapHeight))\n start = AStar.SQ_Location(startTile[0],startTile[1])\n \n spath=False\n \n tiles={0:(tile,),\n 1:((x+1,y),(x-1,y),(x,y+1),(x,y-1))}\n \n posToTest=tiles[distance]\n for ntile in posToTest:\n if not unit.game.getMap().getUnit(ntile):\n try:\n end = AStar.SQ_Location(ntile[0],ntile[1]);path=[]\n for n in astar.findPath(start,end).nodes:\n path.append((n.location.x,n.location.y));\n if spath:\n if path.__len__() < spath.__len__():\n spath=path\n else:\n spath=path\n except:\n pass\n \n if spath: \n debug(\"%s walking to %s\"%(unit.name,str(spath[-1])))\n return spath", "def ScrollToCard(self, card):\n rect = card.GetRect()\n pt = rect.GetBottomRight()\n pt = self.CalcUnscrolledPosition(pt)\n self.ScrollToPoint(pt)\n\n # call rect again since we may have scrolled the window\n rect = card.GetRect()\n pt = rect.GetTopLeft() \n pt = self.CalcUnscrolledPosition(pt)\n self.ScrollToPoint(pt)", "def next_card(_, card_id):\n concepts = get_concepts_applying_filters().order_by('pk')\n if concepts.filter(id__gt=card_id):\n card = concepts.filter(id__gt=card_id)[0]\n else:\n card = concepts[0]\n return redirect('view_card', card.id)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns `self.CARD_PADDING`, fixed for scale.
def GetPadding(self): return self.CARD_PADDING * self.scale
[ "def _get_padding(self):\n return self.pt, self.pr, self.pb, self.pl", "def padding_width(self):\r\n return self.width + self.padding_left + self.padding_right", "def padding_height(self):\r\n return self.height + self.padding_top + self.padding_bottom", "def _get_padding(self):\n padding_l = self._nfft // 2\n padding_r = self._nfft // 2\n return padding_l, padding_r", "def GetPaddingLength(cls, orig_length):\n return (cls.BASE32_BIT_WIDTH - orig_length) % cls.BASE32_BIT_WIDTH", "def pad(self):\n rv = self._pad()\n if rv is not None:\n return rv\n raise AttributeError('The pad went away')", "def padding_index(self):\n return self.stoi.get(self.padding_symbol, -1)", "def get_padding_lengths(self):\n lengths = {}\n for field_name, field in self.fields.items():\n lengths[field_name] = field.get_padding_lengths()\n return lengths", "def padding(amount):\n output = \"\"\n\n for i in range(amount):\n output += \" \"\n\n return output", "def _get_padding_lengths(self) -> Dict[str, int]:\n return self.tokenizer.get_padding_lengths(self.num_sentence_words, self.num_word_characters)", "def padding(value=DEFAULT_PADDING):\n return {f\"padding-{x}\": f\"{value}px\" for x in [\"right\", \"left\", \"top\", \"bottom\"]}", "def padded_count(self):\n c = 0\n for pkt in self.packets:\n if pkt.type[\"padded\"]:\n c += 1\n return c", "def pad(self):\n idx = self.add_symbol(self.pad_word)\n return idx", "def border_height(self):\r\n return self.padding_height() + self.border_top_width + \\\r\n self.border_bottom_width", "def border_width(self):\r\n return self.padding_width() + self.border_left_width + \\\r\n self.border_right_width", "def _compute_padding(kernel_size, dilation, causal):\n\n if causal:\n return (kernel_size - 1) * dilation\n return ((kernel_size - 1) // 2) * dilation", "def __padding(self, s):\n padding_length = self.block_size - len(s) % self.block_size\n return s + padding_length * chr(padding_length)", "def padding_box_x(self):\r\n return self.position_x + self.margin_left + self.border_left_width", "def packet_size(self):\n packet_size = 1\n if self._board_type == 122:\n packet_size = 64\n elif self._board_type in [130, 161, 240]:\n packet_size = 31\n\n return packet_size" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new `Card` of type `subclass` at `pos`.
def NewCard(self, subclass, pos=wx.DefaultPosition, scroll=False): # never use labels, always let Deck set its own label = len(self.cards) # create the new card with the unscaled position # so that we can just call new.Stretch() afterward # to set both position and size pos = [i / self.scale for i in pos] if subclass == "Content": new = card.Content(self, label, pos=pos) elif subclass == "Header": new = card.Header(self, label, pos=pos) elif subclass == "Image": new = card.Image(self, label, pos=pos) new.Stretch(self.scale) # set bindings for every card new.Bind(wx.EVT_LEFT_DOWN, self.OnCardLeftDown) new.Bind(wx.EVT_CHILD_FOCUS, self.OnCardChildFocus) new.Bind(card.Card.EVT_DELETE, self.OnCardDelete) new.Bind(card.Card.EVT_COLLAPSE, self.OnCardCollapse) new.Bind(card.Card.EVT_REQUEST_VIEW, self.OnCardRequest) for ch in new.GetChildren(): ch.Bind(wx.EVT_LEFT_DOWN, self.OnCardChildLeftDown) # raise the appropriate event event = self.NewCardEvent(id=wx.ID_ANY, subclass=subclass) event.SetEventObject(new) self.GetEventHandler().ProcessEvent(event) # make enough space and breathing room for the new card self.FitToChildren() self.ExpandVirtualSize(self.GetPadding() * 2, self.GetPadding() * 2) # make sure the new card is visible if scroll: rect = new.GetRect() deck = self.GetRect() if rect.bottom > deck.bottom or rect.right > deck.right or rect.left < 0 or rect.top < 0: self.ScrollToCard(new) # finish up new.SetFocus() self.cards.append(new) return new
[ "def __init__(self, pos, card=None):\n self.pos = pos\n self.card = card", "def test_make_surface__subclassed_surface(self):\n expected_size = (3, 5)\n expected_flags = 0\n expected_depth = 32\n original_surface = SurfaceSubclass(\n expected_size, expected_flags, expected_depth\n )\n pixelarray = pygame.PixelArray(original_surface)\n\n surface = pixelarray.make_surface()\n\n self.assertIsNot(surface, original_surface)\n self.assertIsInstance(surface, pygame.Surface)\n self.assertNotIsInstance(surface, SurfaceSubclass)\n self.assertEqual(surface.get_size(), expected_size)\n self.assertEqual(surface.get_flags(), expected_flags)\n self.assertEqual(surface.get_bitsize(), expected_depth)", "def payload_for_create(cls, nickname, document_id, card_type, **kwargs):\n payload = super(SubcardUsNode, cls).payload_for_create('SUBCARD-US',\n nickname=nickname,\n document_id=document_id,\n card_type=card_type,\n **kwargs)\n return payload", "def test_2_5_subclass(self):\n\n self.assertTrue(issubclass(Rectangle, Base))", "def card(rank, suit):\n return card_game.Card(rank, suit)", "def __init__(self, color, pos):\n\n super().__init__(color, pos)\n self._role = \"CH\"", "def __create(self, opcode: int) -> packet_type:\r\n\r\n tftpassert(opcode in self._classes, f\"Unsupported opcode: {opcode}\")\r\n packet = self._classes[opcode]()\r\n\r\n return packet", "def __init__(self, color, pos):\n\n super().__init__(color, pos)\n self._role = \"HO\"", "def __init__(self, position: list):\r\n self.offset = 64\r\n super().__init__([position[0] - self.offset/2, position[1] - self.offset/2])\r\n self.image = pg.image.load('images/ball.png')\r\n self.mask = pg.mask.from_surface(self.image)\r\n self.max_speed = 20\r\n self.mass = 1\r\n self.drag = .002", "def construct_tower(tower_type, pos, level):\n return Tower(tower_type, pos, level)", "def __new__(cls, *args, **kwargs):\n if len(cls.__subclasses__()) > 0:\n try:\n tag_group = kwargs['tg']\n custom_type = kwargs['custom_type']\n except KeyError:\n return super(VerseTag, cls).__new__(cls)\n else:\n sub_cls = VerseTag\n node_custom_type = tag_group.node.custom_type\n tg_custom_type = tag_group.custom_type\n try:\n sub_cls = cls._subclasses[(node_custom_type, tg_custom_type, custom_type)]\n except KeyError:\n # When instance of this class has never been created, then try\n # to find corresponding subclass.\n sub_cls = find_tag_subclass(cls, node_custom_type, tg_custom_type, custom_type)\n return super(VerseTag, sub_cls).__new__(sub_cls)\n else:\n return super(VerseTag, cls).__new__(cls)", "def __init__(self, color, pos):\n\n super().__init__(color, pos)\n self._role = \"CN\"", "def __init__(self, board: List[List[Tile]], position: Tuple[int, int]):\n super().__init__(board, position)\n self._tile_type = \"bomb\"", "def __init__(self, color, pos):\n super().__init__(color, pos)\n self._role = \"SO\"", "def __init__(self, index): \r\n if isinstance(index, int):\r\n self.rank = index % 13\r\n self.suit = index // 13\r\n else:\r\n print (\"ERROR: The card constructor must be called with an int\")\r\n raise TypeError", "def __init__(self, type, x, y, width, height):\r\n super(TypedRect, self).__init__(x, y, width, height)\r\n self.type = type", "def add_piece(self, piece_id, position):\n pieces = self.get_pieces()\n board = self.get_board()\n\n if piece_id[1:3] == 'ca':\n pieces[piece_id] = Cannon(piece_id, position, board)\n elif piece_id[1:3] == 'ch':\n pieces[piece_id] = Chariot(piece_id, position, board)\n elif piece_id[1:3] == 'el':\n pieces[piece_id] = Elephant(piece_id, position, board)\n elif piece_id[1:3] == 'gu':\n pieces[piece_id] = Guard(piece_id, position, board)\n elif piece_id[1:3] == 'ho':\n pieces[piece_id] = Horse(piece_id, position, board)\n elif piece_id[1:3] == 'so':\n pieces[piece_id] = Soldier(piece_id, position, board)", "def create_sprite(self):\n rgb = (84, 170, 232)\n height = 15\n length = 15\n self.sprite = BaseStationSprite(rgb)", "def random(position=[0,0],size=[10,10]):\n paint=Paint(position,size)\n return paint" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Select the specified `Card`.
def SelectCard(self, card, new_sel=False): self.selec.SelectCard(card, new_sel)
[ "def UnselectCard(self, card):\n self.selec.UnselectCard(card)", "def SelectNext(self, direc, new_sel=False):\n nxt = self.GetParent().GetNextCard(self.last, direc)\n if nxt:\n self.SelectCard(nxt, new_sel)", "def UnselectCard(self, card):\n if card in self.cards:\n self.cards.remove(card)\n card.Unselect()", "def pick_card(self):\n shuffle(Constants.RANKS)\n shuffle(Constants.SUITE)\n return Card(Constants.RANKS[0], Constants.SUITE[0])", "def MoveSelected(self, dx, dy):\n for c in self.GetSelection():\n self.GetParent().MoveCard(c, dx, dy)", "def GetSelection(self):\n return self.cards", "def ace_restriction_select():\n x, y = properties.SCREEN_WIDTH / 2, properties.SCREEN_HEIGHT / 2\n width, height = SUITS[0][1].width, SUITS[0][1].height\n SUITS[0][1].center = (x - width / 2, y - height / 2)\n SUITS[1][1].center = (x + width / 2, y - height / 2)\n SUITS[2][1].center = (x - width / 2, y + height / 2)\n SUITS[3][1].center = (x + width / 2, y + height / 2)\n\n for index, card_suit in enumerate(makao.CardSuit):\n button(None, SUITS[0][1].center[0] - width / 2, SUITS[0][1].center[1] - height / 1.45,\n 2 * width, height / 5, properties.FRAME_COLOR, properties.FRAME_COLOR)\n\n button('Choose suit', SUITS[0][1].center[0] - width / 2 + 5,\n SUITS[0][1].center[1] - height / 1.45 + 5, 2 * width - 10,\n height / 5 - 5, properties.TABLE_CAPTION_COLOR, properties.TABLE_CAPTION_COLOR)\n\n button(None, SUITS[index][1].center[0] - width / 2, SUITS[index][1].center[1] - height / 2,\n width, height, properties.FRAME_COLOR, properties.FRAME_COLOR)\n\n button(None, SUITS[index][1].center[0] - width / 2 + 5,\n SUITS[index][1].center[1] - height / 2 + 5, width - 10, height - 10,\n properties.BUTTON_COLOR, properties.OVER_BUTTON_COLOR)\n\n SCREEN.blit(SUITS[index][0], SUITS[index][1])\n if SUITS[index][1].collidepoint(pygame.mouse.get_pos()):\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n return card_suit\n\n pygame.display.update()", "def setCardMode( self ):\n self.setCurrentMode(XOrbBrowserWidget.Mode.Card)", "def pick_card(self, putdown_pile):#COMMENTS\n pile_card = putdown_pile.top()\n i = 0\n while i <= self.deck.get_amount()+1:\n card = self.deck.top()\n match = pile_card.matches(card)\n if match:\n return self.deck.pick()[0]\n else:\n self.deck.shuffle()\n i+=1\n return None", "def select(self, hero: int, lane: int):\n self.fac[DraftAction.SelectHero] = hero\n self.fac[DraftAction.Lane] = lane", "def on_card_clicked(self, card_id, card_object, card_state):\n\n\t\t# First possiblity: the card is blocked. Then we reverse selection\n\t\tif card_object.mode == 'blocked':\n\t\t\tif card_object.state == 'normal': card_object.state = 'down'\n\t\t\telif card_object.state == 'down': card_object.state = 'normal'\n\n\t\t# Second possibility: the card is not blocked. Then we must decide what to do with it.\n\t\telse:\n\t\t\t# What will happen depens on the card type and the current action, so we discover them.\n\t\t\t# We also prepare to recalculate zones, which might come to be a necessity.\n\t\t\tclicked_card_type = card_object.get_type()\n\t\t\tcurrent_action = g.save.match['action']\n\t\t\trecalculate_zones = True\n\t\n\t\t\t# In the Strategy Stage, all cards are clickable, but only \n\t\t\t# one formation and one tactic can be active at any point.\n\t\t\n\t\t\t# First, let's deal with what happened when a card was seleted, rather than diselected.\t\n\t\t\tif card_state == 'normal':\n\n\t\t\t\t# If a formation was selected, we must prevent two formations from being equiped at the same time.\n\t\t\t\tif clicked_card_type == 'formation':\n\t\t\t\t\tselected_formations = self.card_hand.get_cards_by_type(card_state = 'selected', card_type = 'formation')\n\t\t\t\t\tif len(selected_formations)>1:\n\t\t\t\t\t\t# Multiple formations are selected. \n\t\t\t\t\t\t# The old one must be unselected so the new one becomes the only.\n\t\t\t\t\t\tfor card in selected_formations:\n\t\t\t\t\t\t\tif card != card_object:\n\t\t\t\t\t\t\t\tcard.state = 'down'\n\t\t\t\t\t\t# This will make sure there are not more skills equiped than allowed.\n\t\t\t\t\t\tself.reevaluate_skills()\n\t\t\t\t\t\t\t\t#recalculate_zones = False\n\t\n\t\t\t\t# If a tactic was selected, we must prevent two tactics from being equiped at the same time.\n\t\t\t\telif clicked_card_type == 'tactic':\n\t\t\t\t\tselected_tactics = self.card_hand.get_cards_by_type(card_state = 'selected', card_type = 'tactic')\n\t\t\t\t\tif len(selected_tactics)>1:\n\t\t\t\t\t\t# Multiple tactics selected. \n\t\t\t\t\t\t# The old one must be unselected so the new one becomes the only.\n\t\t\t\t\t\tfor card in selected_tactics:\n\t\t\t\t\t\t\tif card != card_object:\n\t\t\t\t\t\t\t\tcard.state = 'down'\n\t\t\t\t\t\t#self.reevaluate_skills()\n\t\n\t\t\t# If a skill was selected, we must prevent that there are more equiped skills than what is allowed\n\t\t\t# by the selected formation.\n\t\t\t\tif clicked_card_type in ['attacker','defender','midfielder']:\n\t\t\t\t\tlist_of_formations = self.card_hand.get_cards_by_type(card_state = 'selected', card_type = 'formation')\n\t\t\t\t\tif list_of_formations == []:\n\t\t\t\t\t\t#There are no equiped formation and, thus, the player cannot equip skills yet.\n\t\t\t\t\t\tcard_object.state = 'down'\n\t\t\t\t\t\trecalculate_zones = False\n\t\t\t\t\t\tprint \"In the strategy stage, you may not equip skills without a formation. Wait for a preparation phase.\"\n\t\t\t\t\telse:\n\t\t\t\t\t\t# There is an equiped formation and, thus, he can equip some skills\n\t\t\t\t\t\tformation = list_of_formations[0]\n\t\t\t\t\t\tskill_limit = None\n\t\t\t\t\t\t# This wil allow us to target the relevant zone to the selected skill.\t\t\n\t\t\t\t\t\tzone_index = 0 # The selected card was a defender\n\t\t\t\t\t\tif clicked_card_type == 'midfielder': zone_index =1 # The selected card was a midfielder\n\t\t\t\t\t\telif clicked_card_type == 'attaker': zone_index = 2 # The selected card was an attacker\n\t\t\t\t\t\t# The skill limit tells us how many skills slots are allowed by the formation.\n\t\t\t\t\t\tskill_limit = g.formations[formation.name]['men'][zone_index]\n\t\t\t\t\t\t# Now we check if the limit was exceeded.\n\t\t\t\t\t\tif len(self.card_hand.get_cards_by_type(card_state = 'selected', card_type = clicked_card_type)) > skill_limit:\n\t\t\t\t\t\t\t# If so, we reverse selection.\n\t\t\t\t\t\t\tcard_object.state = 'down'\n\t\t\t\t\t\t\trecalculate_zones = False\n\t\t\n\t\t\t\t\t\n\t\t\t# Now let's deal with what happens when a card is diselected, rather than selected.\n\t\t\telif card_state == 'down':\n\t\t\t\t# It depends on the card type.\n\t\t\t\tif clicked_card_type == 'formation':\n\t\t\t\t\t# If a formation is removed, all equiped skills are unequiped, since one cannot have\n\t\t\t\t\t# skills without a formation in the strategy stage.\n\t\t\t\t\tequiped_skills = self.card_hand.get_cards_by_type(card_state = 'selected', card_type = 'skill')\n\t\t\t\t\tfor card in equiped_skills:\n\t\t\t\t\t\tcard.state = 'down'\n\t\t\n\t\t\t# If there was a change that affects the zone strength, then we must recalculate it.\n\t\t\tif recalculate_zones:\n\t\t\t\tself.calculate_zone_strength()", "def get_card(self, idx):\n return self.cards[idx]", "def give_card(hand: list, cards: dict):\n new_card = choice(list(cards))\n print(f\"The new card is {new_card}.\")\n hand.append(new_card)", "def get_card(card_id):\n\n query = \"\"\"\n select ID, Name, ImgData, Attributes\n from MonsterCards.Cards\n where ID = %s;\n \"\"\"\n\n card = execute(query, (card_id, ))[0]\n return card", "def click_card(self):\n time.sleep(3)\n try:\n WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located((By.CSS_SELECTOR, self.CSS_CASHONDELIVERY1)))\n result = self.driver.find_element_by_css_selector(self.CSS_CASHONDELIVERY1).is_displayed()\n if result == True:\n self.driver.find_element_by_css_selector(self.CSS_CASHONDELIVERY1).click()\n time.sleep(4)\n self.driver.find_element_by_css_selector(self.CSS_CVV).click()\n self.driver.find_element_by_css_selector('.cvv-con > ion-input >input.text-input').send_keys('111')\n time.sleep(2)\n \n else:\n print()\n except:\n print()\n time.sleep(3)", "def select_obj(self):\n self.log.info(__name__ + ': ' + 'def ' + self.select_obj.__name__ + '(): ' + self.select_obj.__doc__)\n\n self.obj = self.get_obj(self._x, self._y)\n if self.obj is not None:\n if hasattr(self.obj, 'fleet') and (self.obj.fleet != 0):\n self.select_fleet(self.obj.fleet)\n return\n self.light_zone(self.obj.x, self.obj.y)\n self.light = True\n self.speech.speak(self.phrases['select'] + self.obj.name, True)\n else:\n self.speech.speak(self.phrases['select_none'], True)", "def pick_small_card(self, card_list):\r\n small_card = card_list[0]\r\n small_card_rank_num = small_card.get_rank_num()\r\n for c in card_list:\r\n if c.get_rank_num() < small_card_rank_num:\r\n small_card = c\r\n small_card_rank_num = c.get_rank_num(c)\r\n \r\n return small_card", "def deal_card():\n new_card = random.choice(cards)\n return new_card", "def SelectGroup(self, group, new_sel=True):\n # in case we are coming from a card that's inside the group,\n # we may want to return to that card after selection ends\n # so we select the group but restore the last card after\n if self.last and self.last in group.GetMembers():\n crd = self.last\n\n if new_sel: self.UnselectAll()\n for c in group.GetMembers(): self.SelectCard(c)\n\n if crd:\n self.last = crd" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }