query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
Execute the sequence of SQL statements in {sql} as a single command
def execute(self, *sql): # assemble the command and pass it on to the connection return self.postgres.execute(self.connection, "\n".join(sql))
[ "def execute_sql_commands(sql, cur):\n for line in sql.split(\";\"):\n line = line.strip()\n line = line.replace(\"\\n\",\" \")\n if line == \"\":\n continue\n # lg.info(\"sql:: ::line %s\"%line)\n cur.execute(line)", "def execute(self, *sql):\n # assemble the SQL statements\n statements = '\\n'.join(sql)\n # place them in my stream\n print(statements, file=self.stream)\n # all done\n return", "def batchexec(self, sqls) -> None:\n self.connect(lambda cursor: [cursor.execute(s) for s in sqls], True)", "def _execute(self, sql):\n try:\n cursor = self.db.cursor()\n cursor.execute(sql)\n self.db.commit()\n except:\n # Rollback in case there is any error\n self.db.rollback()", "def execute(self,sql):\n # self.results = self.execute_silent(sql)\n # return self.results\n # sql = self.format_sql(sql, **kwargs)\n sql_list = sql.split(';')\n for stmt in sql_list:\n if stmt:\n stmt = stmt.strip()\n if len(stmt) < 10:\n break\n result = self.execute_silent(stmt)\n #if result is not None,It's select stmt.\n if result:\n return result", "def run_sql(self, sql):\n def mk_run_sql_q(sql):\n return {\n 'type' : 'run_sql',\n 'args': {\n 'sql' : sql\n }\n }\n return self.v1q(mk_run_sql_q(sql))", "def execute_sql_cmds(cursor, cmds, args):\n\tfor cmd in cmds:\n\t\tcursor.execute(cmd, args)\n\t\tif len(args) == 3:\n\t\t\tprint(\"{} rows updated on {} table for {}\".format(cursor.rowcount, str.split(cmd)[1], args[2]))\n\t\telse:\n\t\t\tprint(\"{} rows updated on {} table\".format(cursor.rowcount, str.split(cmd)[1]))", "def run_sql(self, sql):\n cursor = self.db_handle_.cursor()\n cursor.execute(sql)\n return cursor.fetchall()", "def pg_execute(pg_conn, sql):\n print sql\n # XXX execute command", "def execute_sql_files(connection, sql_files):\n for filename in sql_files:\n statement = resource_text(filename)\n for sub_statement in statement.split(\";\"):\n if sub_statement.strip():\n connection.execute(text(sub_statement))", "def run_sql(ctx, sql_code):\n mysql_args = '-e \"{}\" 2>&1'.format(sql_code)\n run_mysql_command(ctx, mysql_args, \"\")", "def execute_many_sql(sql, seq_of_params):\n con = get_db_connection()\n cur = con.executemany(sql, seq_of_params)\n results = cur.fetchall()\n column_names = [description[0] for description in cur.description] if cur.description is not None else None\n close_db_connection(con)\n return DB_Query(column_names, results)", "def batched_query(self, sql):\r\n\r\n result_sets = []\r\n messages = \"\"\r\n query = []\r\n last_query=\"\"\r\n\r\n batches = re.split(\"^\\s*(GO(?:\\s+[0-9]+)?)\\s*(?:--.*)?$\",sql,flags=re.M|re.I)\r\n # print(batches)\r\n for b in batches:\r\n if b.upper() == \"GO\":\r\n # execute one\r\n query.append(last_query)\r\n continue\r\n else:\r\n match = re.match(\"^GO\\s+([0-9]+)$\",b,re.I)\r\n if match is not None:\r\n #execute many\r\n for i in range(0,int(match.group(1))):\r\n query.append(last_query)\r\n else:\r\n # not a Go statment\r\n last_query = b\r\n query.append(last_query)\r\n\r\n # print(query)\r\n for q in query:\r\n r = self.query(q)\r\n if r is not None:\r\n result_sets.extend(r)\r\n messages += self.messages\r\n\r\n self.messages = messages\r\n return result_sets", "def execute_query_sequence(db_cursor, all_queries):\n\n for query in all_queries:\n db_cursor.execute(query)", "def run_sql_file(self, sqlfile):\n try:\n queries = self.get_queries_from(sqlfile)\n queries_executed = 0\n for query in queries:\n if self._execute_query(query, values=None): # execute each query\n queries_executed += 1\n print(\"{} Executed queries from {}\".format(queries_executed, sqlfile))\n except pymysql.InternalError as error:\n print(error.args[1])", "def run_sql_script(connection_string, sql_script):\r\n with connect(connection_string) as conn:\r\n cursor = conn.cursor()\r\n sqlQuery = \"\"\r\n for line in sql_script.splitlines():\r\n if line.startswith('GO') and line.endswith('GO'):\r\n cursor.execute(sqlQuery)\r\n sqlQuery = ''\r\n else:\r\n sqlQuery = sqlQuery + \"\\n\" + line\r\n if len(sqlQuery.strip())>0:\r\n cursor.execute(sqlQuery)", "def execute(self):\n if self.sql is None:\n self.sql = self.construct_query()\n # Only SQL strings can be split, not (e.g.) SQLAlchemy statements.\n if self.multiple_statements and isinstance(self.sql, str):\n statements = self._split_sql()\n else:\n statements = [self.sql]\n single_statement = True if len(statements) == 1 and self.filename else False\n try:\n for statement in statements:\n result_proxy = self.cm.conn.execute(statement)\n log_string = self.filename if single_statement else str(statement)[:25]\n self.logger.info(\"Executed {} against {}\".format(log_string, self.cm.db))\n if result_proxy.cursor:\n return self.fetch_results(result_proxy)\n except Exception as e:\n self.logger.exception(e)\n raise", "def batch_execute(self, sql_list):\n with self.connection.cursor() as dbc:\n responses = []\n for sql in sql_list:\n dbc.execute(sql)\n responses.append(dbc.fetchall())\n return responses", "def execute_sql_file(file_name, session):\n sql_list = read_sql_file(file_name)\n for sql in sql_list:\n session.execute(sql)\n session.commit()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the project_name of this ShowProjectWorkHoursResponseBodyWorkHours. 项目名称
def project_name(self, project_name): self._project_name = project_name
[ "def set_project_name(self, name):\n self.project_tags[\"PROJECT_NAME\"] = name", "def project_name(self, project_name):\n \n self._project_name = project_name", "def project_name(self, project_name):\n\n self._project_name = project_name", "def evaluation_project_name(self, evaluation_project_name):\n self._evaluation_project_name = evaluation_project_name", "def setProject(self, projectname):\r\n projectdict = self.projectdict()\r\n for p in projectdict:\r\n if projectdict[p] == projectname:\r\n self.projectid = p", "def get_project_name(self):\n return self.name.text()", "def migration_project_name(self, migration_project_name):\n self._migration_project_name = migration_project_name", "def project_name(self) -> str:\n return pulumi.get(self, \"project_name\")", "def project_name(self):\n return self._project_name", "def update_project_name(self, curr_proj, proj_new_name):\r\n for proj in self.__projects:\r\n if proj == curr_proj: # Find the project with the same current name\r\n proj.update_name(proj_new_name) # Update the project's name\r", "def update_name(self, project: str, new_name: str) -> dict:\n assert self.exists(project), f'Project {project} inesistente'\n\n return self.collection.find_one_and_update(\n {\n 'url': project\n },\n {\n '$set': {\n 'name': new_name,\n }\n }\n )", "def _make_name_for_project(self, project):\n max_files_in_name = 3\n source_names = [f.file_display_name() for f in\n project.kit.source_files().order_by('created')]\n\n if source_names:\n project_name = '; '.join(source_names[:max_files_in_name])\n if len(source_names) > max_files_in_name:\n project_name = '%s; +%d more' % (\n project_name, len(source_names) - max_files_in_name)\n project.name = \"{0}: {1}\".format(settings.APP_SLUG_INSTANCE, project_name)", "def __str__(self):\n return self.project_name", "def work_hours_type_name(self, work_hours_type_name):\n self._work_hours_type_name = work_hours_type_name", "def rename_project(request):\n data = json.loads(request.body.decode('utf-8'))\n try:\n proj = models.Project.objects.get(pk=data['projid'])\n except models.Project.DoesNotExist:\n return JsonResponse({'error': f'Project with that ID does not exist in DB'}, status=404)\n # check if new project not already exist, and user have permission for all dsets\n proj_exist = models.Project.objects.filter(name=data['newname'])\n if proj_exist.count():\n if proj_exist.get().id == proj.id:\n return JsonResponse({'error': f'Cannot change name to existing name for project {proj.name}'}, status=403)\n else:\n return JsonResponse({'error': f'There is already a project by that name {data[\"newname\"]}'}, status=403)\n if is_invalid_proj_exp_runnames(data['newname']):\n return JsonResponse({'error': f'Project name cannot contain characters except {settings.ALLOWED_PROJEXPRUN_CHARS}'}, status=403)\n dsets = models.Dataset.objects.filter(runname__experiment__project=proj)\n if not all(check_ownership(request.user, ds) for ds in dsets):\n return JsonResponse({'error': f'You do not have the rights to change all datasets in this project'}, status=403)\n # queue jobs to rename project, update project name after that since it is needed in job for path\n create_job('rename_top_lvl_projectdir', newname=data['newname'], proj_id=data['projid'])\n proj.name = data['newname']\n proj.save()\n return JsonResponse({})", "def add_to_project_name(self, s):\n name = self.rootelement.get(\"projectName\")\n name_new = name + \" \" + s\n self.rootelement.set(\"projectName\",name_new)", "def source_project_name(self, source_project_name):\n self._source_project_name = source_project_name", "def team_name(self, team_name):\n\n self._team_name = team_name", "def set_teamname(self, new_name):\n self.teamname = new_name" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the nick_name of this ShowProjectWorkHoursResponseBodyWorkHours. 用户昵称
def nick_name(self): return self._nick_name
[ "def nick_name(self):\n if \"nickName\" in self._prop_dict:\n return self._prop_dict[\"nickName\"]\n else:\n return None", "def get_nick(self) -> str:\n return self.name", "def get_nickname(self):\n return self._nick", "def getNickname(self):\n return self.nickname", "def nickname(self):\n return self._iface.get_nickname(self.user_id)", "def nickname(self):\n return self._nickname", "def get_black_player_nickname(self, obj):\n return obj.black_player.nickname", "def get_white_player_nickname(self, obj):\n return obj.white_player.nickname", "def display_name(self) -> str:\n return self.nickname if isinstance(self.nickname, str) else self.username", "def branch_nick(self):\n return self._branch_nick", "def get_nickname_for_user(cls, user):\n return cls.get_account_for_user(user).nickname", "def get_username(problem_name, instance_number):\n\n return \"{}_{}\".format(sanitize_name(problem_name), instance_number)", "def retrieve_user_nickname(self):\n return User.query.filter_by(nickname=self.args['nickname']).first()", "def get_username(self) -> str:\n try:\n return self[\"user\"]\n except KeyError:\n raise MarathonNotConfigured(\n \"Could not find marathon user in system marathon config\"\n )", "def get_name(self):\n return self.client._perform_json(\"GET\", \"/project-folders/%s\" % self.project_folder_id).get(\"name\", None)", "def _get_username(self):\n name = self._get_username_from_cookies()\n if name:\n return name\n if self._oauth and self._login_info[0]:\n return self._login_info[0]\n return self._get_username_from_api()", "def nickname():\n return jsonify(name=getRandomLine(nickNamesFile))", "def executor_nick_name(self):\n return self._executor_nick_name", "def _get_username_from_api(self):\n result = self.api_query(action=\"query\", meta=\"userinfo\")\n return result[\"query\"][\"userinfo\"][\"name\"]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the nick_name of this ShowProjectWorkHoursResponseBodyWorkHours. 用户昵称
def nick_name(self, nick_name): self._nick_name = nick_name
[ "def nick_name(self):\n if \"nickName\" in self._prop_dict:\n return self._prop_dict[\"nickName\"]\n else:\n return None", "def nick_name(self):\n return self._nick_name", "async def _nick(self, nick: str) -> str:\n\n logger.debug(f\"Setting nick to {nick!r}\")\n\n self._target_nick = nick\n\n reply = await self._connection.send(\"nick\", {\"name\": nick})\n data = self._extract_data(reply)\n\n new_nick = data[\"to\"]\n self._target_nick = new_nick\n\n if self._session is not None:\n self._session = self._session.with_nick(new_nick)\n\n logger.debug(f\"Set nick to {new_nick!r}\")\n\n return new_nick", "def nickChanged(self, nick):\n self.nickname = nick", "def get_nick(self) -> str:\n return self.name", "def nickname(self, value):\n self._nickname = value", "def nickname(self, nickname):\n\n self._nickname = nickname", "def sendnick(self):\n self._send(\"NICK %s\" % (CONFIG[\"nick\"]))", "def executor_nick_name(self, executor_nick_name):\n self._executor_nick_name = executor_nick_name", "def set_nickname(self, nickname):\n \n if len(nickname) > globals.MAX_NICKNAME_LENGTH:\n nick = nickname[0:globals.MAX_NICKNAME_LENGTH-3]+\"...\"\n else:\n nick = nickname\n \n self._nickname.set_message(nick)", "def nickname(self, nickname):\n if nickname is None:\n raise ValueError(\"Invalid value for `nickname`, must not be `None`\") # noqa: E501\n\n self._nickname = nickname", "def nick_change(self,user,new_nick,stanza):\r\n pass", "def nickname(self):\n self._nickname = None", "async def set_nick(\n client,\n event,\n user: ('user', 'Who\\'s?'),\n nick: P(str, 'Their new nick', min_length = 1, max_length = 32) = None,\n):\n yield\n await client.user_guild_profile_edit(event.guild, user, nick=nick)\n yield f'{user:f}\\'s nick has been updated'", "def nickname(self, new_nickname):\r\n self.set({\"nickname\": new_nickname})", "def get_nickname(self):\n return self._nick", "def change_username(self, name):\n self.username = name", "def add_nickname(self, nickname):\n if 'Nicknames' not in self.properties:\n self.properties['Nicknames'] = []\n if (len(self.properties['Nicknames']) == 1 and self.properties['Nicknames'][0].startswith('Temp')):\n self.properties['Nicknames'][0] = nickname.title()\n else:\n self.properties['Nicknames'].append(nickname.title())", "def work_hours_type_name(self, work_hours_type_name):\n self._work_hours_type_name = work_hours_type_name" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the work_date of this ShowProjectWorkHoursResponseBodyWorkHours. 工时日期
def work_date(self): return self._work_date
[ "def work_hours_created_time(self):\n return self._work_hours_created_time", "def _get_workdate(self):\n return self.currentEnv.get('workdate') or datetime.today()", "def work_hours_updated_time(self):\n return self._work_hours_updated_time", "def work_hours_num(self):\n return self._work_hours_num", "def actual_work_hours(self):\n return self._actual_work_hours", "def get_working_hour(self):\n working_hrs_id = self.search([('active', '=', True)])\n if working_hrs_id:\n return {\n 'biz_open_time': time(int(working_hrs_id.start_hour), int(working_hrs_id.start_minute), 0),\n 'biz_close_time': time(int(working_hrs_id.end_hour), int(working_hrs_id.end_minute), 0),\n 'holiday_list': {line.occ_date: line.name for line in working_hrs_id.non_working_days_line},\n 'deadline_revise_percentage': working_hrs_id.deadline_revise_percentage,\n }\n raise ValidationError(\"Working Hour configuration is missing!\")", "def work_date(self, work_date):\n self._work_date = work_date", "def work_hours_created_time(self, work_hours_created_time):\n self._work_hours_created_time = work_hours_created_time", "def expected_work_hours(self):\n return self._expected_work_hours", "def issue_date(self):\n return self._issue_date", "def get_work_address(self):\n return self._api_call('GET', 'v1.2/places/work')", "def create_record(self):\n return {\n \"date\": self.date_str,\n \"start_day\": self.args.work_hours[0].strftime(\"%H:%M\"),\n \"end_day\": self.args.work_hours[1].strftime(\"%H:%M\"),\n \"start_break\": self.args.break_time[0].strftime(\"%H:%M\"),\n \"end_break\": self.args.break_time[1].strftime(\"%H:%M\"),\n \"comment\": self.args.comment,\n \"special\": str(self.args.special)\n }", "def report_date(self) -> str | None:\n\n return self._report_date", "def hire_date(self):\n if \"hireDate\" in self._prop_dict:\n return datetime.strptime(self._prop_dict[\"hireDate\"].replace(\"Z\", \"\"), \"%Y-%m-%dT%H:%M:%S.%f\")\n else:\n return None", "def work_hours_type_name(self):\n return self._work_hours_type_name", "def weekPay(self):\n pay = self.hourlyPay * self.hoursWorked\n return pay", "def issue_date(self) -> str:\n return pulumi.get(self, \"issue_date\")", "def get_week_date(self):\n if self.get_is_calendar_date():\n return get_week_date_from_calendar_date(self.year,\n self.month_of_year,\n self.day_of_month)\n if self.get_is_ordinal_date():\n return get_week_date_from_ordinal_date(self.year,\n self.day_of_year)\n if self.get_is_week_date():\n return self.year, self.week_of_year, self.day_of_week", "def GenerateReport():\n now = datetime.now(tz=tz.tzlocal())\n localized_now = now.astimezone(tz.gettz(keys.WORK_HOURS_CALENDAR_TZ))\n today = localized_now.replace(hour=0, minute=0, second=0, microsecond=0)\n today -= timedelta(weeks=FLAGS.end_weeks)\n start = today - timedelta(weeks=FLAGS.start_weeks)\n\n calendar_instance = calendar_client.Calendar()\n\n # Store report information as a list of dicts with the date, enter work time,\n # and exit work time, and timedelta between them.\n report = []\n\n for event in calendar_instance.GetEvents(\n keys.WORK_HOURS_CALENDAR_ID, start, today):\n startEntity = event.get('start')\n summary = event.get('summary')\n\n # Skip OOOs (out of office).\n if OOO_REGEX.match(summary):\n continue\n # Treat WFH as 8 hours worked.\n if WFH_REGEX.match(summary):\n startDate = dateutil.parser.parse(startEntity.get('date'))\n report.append({\n 'date': startDate.date(),\n 'start': startDate.replace(hour=9),\n 'end': startDate.replace(hour=17),\n 'delta': timedelta(hours=8)})\n # All other events should not be all-day events.\n if not startEntity.get('dateTime'):\n continue\n\n startTime = dateutil.parser.parse(startEntity.get('dateTime'))\n startDate = startTime.date()\n\n if report and report[-1]['date'] == startDate:\n report_entity = report[-1]\n report_entity['end'] = startTime\n report_entity['delta'] = (\n report_entity['end'] - report_entity['start'])\n else:\n report.append({'date': startDate, 'start': startTime})\n\n PrintReport(report)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the work_date of this ShowProjectWorkHoursResponseBodyWorkHours. 工时日期
def work_date(self, work_date): self._work_date = work_date
[ "def _set_workdate(self, workdate):\n self.currentEnv['workdate'] = workdate", "def work_hours_setting(self, work_hours_setting):\n\n self._work_hours_setting = work_hours_setting", "def work_date(self):\n return self._work_date", "def work_hours_num(self, work_hours_num):\n self._work_hours_num = work_hours_num", "def work_hours_created_time(self, work_hours_created_time):\n self._work_hours_created_time = work_hours_created_time", "def work_hours_updated_time(self, work_hours_updated_time):\n self._work_hours_updated_time = work_hours_updated_time", "def actual_work_hours(self, actual_work_hours):\n self._actual_work_hours = actual_work_hours", "def _get_workdate(self):\n return self.currentEnv.get('workdate') or datetime.today()", "def expected_work_hours(self, expected_work_hours):\n self._expected_work_hours = expected_work_hours", "def work_hours(self, work_hours):\n if work_hours is not None and len(work_hours) > 1024:\n raise ValueError(\"Invalid value for `work_hours`, length must be less than or equal to `1024`\") # noqa: E501\n\n self._work_hours = work_hours", "def _set_date_weekly(self):\n dt_weekday = dt.now()\n try:\n dt_weekday = self._get_datetime_or_error()\n except ValueError:\n self._dt_string = \"\"\n raise InvalidDateError(detail={\n \"message\": \"Invalid Date Provided\",\n \"period\": self.period.value,\n \"date\": self._given_date\n })\n week_start = dt_weekday - timedelta(days=dt_weekday.weekday())\n self.date['year'] = week_start.year\n self.date['month'] = week_start.month\n self.date['day'] = week_start.day", "def dock_date(self, dock_date):\n self._dock_date = dock_date", "def work_hours_type_name(self, work_hours_type_name):\n self._work_hours_type_name = work_hours_type_name", "def work_experience(self, work_experience):\n\n self._work_experience = work_experience", "def test_work_time_line_changes_by_date_picker(self):\n\n month_from = '11'\n year_from = '2014'\n day_from = '01'\n\n month_to = '11'\n year_to = '2014'\n day_to = '31'\n\n days_count = 31\n\n self.create_page.ad_form\\\n .set_work_time_by_date_picker(self.create_page.ad_form.WORK_TIME_DATE_FROM,\n month_from,\n year_from,\n day_from)\n\n self.create_page.ad_form\\\n .set_work_time_by_date_picker(self.create_page.ad_form.WORK_TIME_DATE_TO,\n month_to,\n year_to,\n day_to)\n\n text = self.create_page.ad_form.get_work_time_line_text()\n actual_days_count = int(text.split()[0])\n\n self.assertEquals(days_count, actual_days_count)", "def work_hours_created_time(self):\n return self._work_hours_created_time", "def set_datetime(self, date):\n self.date = date", "def set_date(self, date):\n self.date = date\n return", "def issue_date(self, issue_date):\n self._issue_date = issue_date" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the work_hours_num of this ShowProjectWorkHoursResponseBodyWorkHours. 工时花费
def work_hours_num(self): return self._work_hours_num
[ "def actual_work_hours(self):\n return self._actual_work_hours", "def work_hours_created_time(self):\n return self._work_hours_created_time", "def work_hours_num(self, work_hours_num):\n self._work_hours_num = work_hours_num", "def expected_work_hours(self):\n return self._expected_work_hours", "def getWorkedHours(self):\n hours = 0\n for task in self.context:\n obj = self.context[task]\n hours += getMultiAdapter((obj, self.request),\n ifaces.ITaskStats).getWorkedHours()\n\n return hours", "def work_hours_updated_time(self):\n return self._work_hours_updated_time", "def work_hours(self, work_hours):\n if work_hours is not None and len(work_hours) > 1024:\n raise ValueError(\"Invalid value for `work_hours`, length must be less than or equal to `1024`\") # noqa: E501\n\n self._work_hours = work_hours", "def get_total_hours_from_workitems(workitems):\n return workitems.aggregate(Sum('hours'))['hours__sum'] or 0", "def work_hours_type_name(self):\n return self._work_hours_type_name", "def get_working_hour(self):\n working_hrs_id = self.search([('active', '=', True)])\n if working_hrs_id:\n return {\n 'biz_open_time': time(int(working_hrs_id.start_hour), int(working_hrs_id.start_minute), 0),\n 'biz_close_time': time(int(working_hrs_id.end_hour), int(working_hrs_id.end_minute), 0),\n 'holiday_list': {line.occ_date: line.name for line in working_hrs_id.non_working_days_line},\n 'deadline_revise_percentage': working_hrs_id.deadline_revise_percentage,\n }\n raise ValidationError(\"Working Hour configuration is missing!\")", "def hours(self):\n return int(self.minutes / 60)", "def node_hours(self) -> float:\n return pulumi.get(self, \"node_hours\")", "def getUserWorkedHours(self):\n hours = 0\n for log in self.context:\n if self.context[log].user == self.request.principal.id:\n hours += self.context[log].hours\n\n return hours", "def node_hours(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"node_hours\")", "def __total_hours(self):\n total_hours = 0\n for timesheet in self.timesheets.all():\n total_hours += timesheet.total_hours\n \n return total_hours", "def __total_hours(self):\n return (self.day_1_hours or 0) + (self.day_2_hours or 0) + (self.day_3_hours or 0) + \\\n (self.day_4_hours or 0) + (self.day_5_hours or 0) + (self.day_6_hours or 0) + (self.day_7_hours or 0)", "def work_hours_created_time(self, work_hours_created_time):\n self._work_hours_created_time = work_hours_created_time", "def weekPay(self):\n pay = self.hourlyPay * self.hoursWorked\n return pay", "def orbit_hours(self):\n return self.orbital_period() * 24.0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the work_hours_num of this ShowProjectWorkHoursResponseBodyWorkHours. 工时花费
def work_hours_num(self, work_hours_num): self._work_hours_num = work_hours_num
[ "def work_hours_setting(self, work_hours_setting):\n\n self._work_hours_setting = work_hours_setting", "def work_hours(self, work_hours):\n if work_hours is not None and len(work_hours) > 1024:\n raise ValueError(\"Invalid value for `work_hours`, length must be less than or equal to `1024`\") # noqa: E501\n\n self._work_hours = work_hours", "def work_hours_num(self):\n return self._work_hours_num", "def actual_work_hours(self, actual_work_hours):\n self._actual_work_hours = actual_work_hours", "def work_hours_created_time(self, work_hours_created_time):\n self._work_hours_created_time = work_hours_created_time", "def expected_work_hours(self, expected_work_hours):\n self._expected_work_hours = expected_work_hours", "def work_hours_updated_time(self, work_hours_updated_time):\n self._work_hours_updated_time = work_hours_updated_time", "def work_hours_type_name(self, work_hours_type_name):\n self._work_hours_type_name = work_hours_type_name", "def hours(self, new_hours):\n\n if (type(new_hours) != int and type(new_hours) != float) or new_hours < 0:\n raise ValueError(\n \"The number of working hours should be float or int and must be greater than or equal to zero\")\n\n self._hours = new_hours", "def actual_work_hours(self):\n return self._actual_work_hours", "def engine_hours(self, engine_hours):\n\n self._engine_hours = engine_hours", "def work_hours_created_time(self):\n return self._work_hours_created_time", "def save_hours(validated_data):\n\n for w_h in validated_data['working_hours']:\n new_w_h = WorkingHoursSerializer(data={\n 'courier_id': validated_data['courier_id'],\n 'working_hours': w_h\n })\n if not new_w_h.is_valid():\n raise ValidationError(new_w_h.errors)\n new_w_h.save()", "def work_hours_type_name(self):\n return self._work_hours_type_name", "def expected_work_hours(self):\n return self._expected_work_hours", "def get_working_hour(self):\n working_hrs_id = self.search([('active', '=', True)])\n if working_hrs_id:\n return {\n 'biz_open_time': time(int(working_hrs_id.start_hour), int(working_hrs_id.start_minute), 0),\n 'biz_close_time': time(int(working_hrs_id.end_hour), int(working_hrs_id.end_minute), 0),\n 'holiday_list': {line.occ_date: line.name for line in working_hrs_id.non_working_days_line},\n 'deadline_revise_percentage': working_hrs_id.deadline_revise_percentage,\n }\n raise ValidationError(\"Working Hour configuration is missing!\")", "def set_timestep(self, timestep_per_hour):\n self.get_configuration(\"Timestep\")[0].Number_of_Timesteps_per_Hour = timestep_per_hour", "def work_hours_updated_time(self):\n return self._work_hours_updated_time", "def extra_hours(self, new_extra_hours):\n\n if (type(new_extra_hours) != int and type(new_extra_hours) != float) or new_extra_hours < 0:\n raise ValueError(\n \"The number of extra working hours should be float or int and must be greater than or equal to zero\")\n\n self._extra_hours = new_extra_hours" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the work_hours_type_name of this ShowProjectWorkHoursResponseBodyWorkHours. 工时类型
def work_hours_type_name(self): return self._work_hours_type_name
[ "def work_hours_type_name(self, work_hours_type_name):\n self._work_hours_type_name = work_hours_type_name", "def work_hours_num(self):\n return self._work_hours_num", "def workpiece_type(self):\n return self._workpiece_type", "def work_hours_created_time(self):\n return self._work_hours_created_time", "def get_working_hour(self):\n working_hrs_id = self.search([('active', '=', True)])\n if working_hrs_id:\n return {\n 'biz_open_time': time(int(working_hrs_id.start_hour), int(working_hrs_id.start_minute), 0),\n 'biz_close_time': time(int(working_hrs_id.end_hour), int(working_hrs_id.end_minute), 0),\n 'holiday_list': {line.occ_date: line.name for line in working_hrs_id.non_working_days_line},\n 'deadline_revise_percentage': working_hrs_id.deadline_revise_percentage,\n }\n raise ValidationError(\"Working Hour configuration is missing!\")", "def work_hours(self, work_hours):\n if work_hours is not None and len(work_hours) > 1024:\n raise ValueError(\"Invalid value for `work_hours`, length must be less than or equal to `1024`\") # noqa: E501\n\n self._work_hours = work_hours", "def get_teaching_hours(school_type):\n\tteaching_hours = {\n\t\t'primary':4,\n\t\t'primary_dc':4,\n\t\t'lower_secondary':6,\n\t\t'lower_secondary_dc':6,\n\t\t'upper_secondary':8,\n\t\t'secondary':8,\n\t\t'secondary_dc':8\n\t}\n\treturn teaching_hours[school_type]", "def work_hours_created_time(self, work_hours_created_time):\n self._work_hours_created_time = work_hours_created_time", "def work_hours_num(self, work_hours_num):\n self._work_hours_num = work_hours_num", "def actual_work_hours(self):\n return self._actual_work_hours", "def work_hours_updated_time(self):\n return self._work_hours_updated_time", "def worker_type(self) -> str:\n return pulumi.get(self, \"worker_type\")", "def job_details_type(self) -> str:\n return pulumi.get(self, \"job_details_type\")", "def expected_work_hours(self):\n return self._expected_work_hours", "def incident_type_name(self):\n return self._incident_type_name", "def work_hours_setting(self, work_hours_setting):\n\n self._work_hours_setting = work_hours_setting", "def race_type_name(self):\n return self._race_types.get(self.race_type, None)", "def _get_type_name(self):\n return self._type_name", "def work_timing(self):\n from .timing import WorkEntry\n work = self['Work']\n if work is None or work == '':\n work = []\n else:\n work = work.split('\\n')\n ret = []\n for w in work:\n e = WorkEntry.from_str(w)\n e.code = self.code\n ret.append(e)\n return ret" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the work_hours_type_name of this ShowProjectWorkHoursResponseBodyWorkHours. 工时类型
def work_hours_type_name(self, work_hours_type_name): self._work_hours_type_name = work_hours_type_name
[ "def work_hours_type_name(self):\n return self._work_hours_type_name", "def work_hours_num(self, work_hours_num):\n self._work_hours_num = work_hours_num", "def work_hours_setting(self, work_hours_setting):\n\n self._work_hours_setting = work_hours_setting", "def work_hours_created_time(self, work_hours_created_time):\n self._work_hours_created_time = work_hours_created_time", "def workpiece_type(self, workpiece_type):\n self._workpiece_type = workpiece_type", "def work_hours(self, work_hours):\n if work_hours is not None and len(work_hours) > 1024:\n raise ValueError(\"Invalid value for `work_hours`, length must be less than or equal to `1024`\") # noqa: E501\n\n self._work_hours = work_hours", "def work_hours_updated_time(self, work_hours_updated_time):\n self._work_hours_updated_time = work_hours_updated_time", "def expected_work_hours(self, expected_work_hours):\n self._expected_work_hours = expected_work_hours", "def test_update_work_type_name(self):\n # login as manager\n self.authenticate(self.manager)\n\n # pre-assert the work type has a given name\n self.assertEqual(self.wt1.name, \"WorkType1\")\n\n # alter the work type\n response = self.client.patch(self.url_wt1, {\"name\": \"NewName\"})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # assert the name changed\n self.assertEqual(WorkType.objects.get(id=self.wt1.id).name, \"NewName\")", "def team_set_name(self, team_type: TeamType, team_name):\r\n\t\tself._teams[team_type].team_name = team_name\r\n\t\t_logger.info(\"Set the name of team {0} to \\\"{1}\\\".\" \\\r\n\t\t\t.format(team_type, team_name))", "def actual_work_hours(self, actual_work_hours):\n self._actual_work_hours = actual_work_hours", "def hw_type(self, hw_type):\n if self.local_vars_configuration.client_side_validation and hw_type is None: # noqa: E501\n raise ValueError(\"Invalid value for `hw_type`, must not be `None`\") # noqa: E501\n\n self._hw_type = hw_type", "def work_hours_num(self):\n return self._work_hours_num", "def job_name_type(self, job_name_type):\n self._job_name_type = job_name_type", "def incident_type_name(self, incident_type_name):\n self._incident_type_name = incident_type_name", "def tier_type_name(self, tier_type_name):\n\n self._tier_type_name = tier_type_name", "def worker_type(self, worker_type):\n \n self._worker_type = worker_type", "def type_name(self, type_name):\n\n self._type_name = type_name", "def measurement_type_name(self, measurement_type_name):\n\n self._measurement_type_name = measurement_type_name" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the issue_id of this ShowProjectWorkHoursResponseBodyWorkHours. 工作项id
def issue_id(self): return self._issue_id
[ "def issue_id(self) -> str:\n return self._yaml[\"commit\"][\"issue_id\"]", "def find_issue_id(self):", "def workitem_id(self):\n return self._workitem_id", "def get_issue(self):\n issue_id = self.kwargs['issue_id']\n try:\n issue = Issue.objects.get(pk=issue_id)\n except ObjectDoesNotExist:\n raise ObjectNotFound('Not found')\n if issue.project.pk != self.project.pk:\n raise ObjectNotFound('Not found')\n return issue", "def id(self):\n return str(self._github_issue.number)", "def get_problemID(self):\n\t\treturn self.prDoc['about']['problemID']", "def work_hours_num(self):\n return self._work_hours_num", "def issueNumber(self):\n return self.__issueNumber", "def get_last_worked_on_step_id(self):\n logger.debug(\"Searching for ID of the step last worked on.\")\n last_id = None\n for step in self.steps:\n if any((task for task in step.tasks if task.status == \"DONE\")) and (not last_id or step.id > last_id):\n last_id = step.id\n if not last_id:\n raise ValueError(\"No ID is found for last worked on step for ticket {}\".format(self.id))\n return last_id", "def issue(self, issue_id):\n return Issue.find(issue_id, self)", "def GetIssue(self):\n if self.issue is None and not self.lookedup_issue:\n if self.GetBranch():\n self.issue = self._GitGetBranchConfigValue(ISSUE_CONFIG_KEY)\n if self.issue is not None:\n self.issue = int(self.issue)\n self.lookedup_issue = True\n return self.issue", "def _get_workout_id(username):\n workout_id = select(u.current_workout_id for u in UserInformationData if u.username == username).first()\n\n return workout_id", "def get_issue(self, issue_id):\n try:\n json = self.get('repos/%(owner)s/%(repo)s/issues/%(issue_id)d' % {\n 'owner': self.repo_owner,\n 'repo': self.repo_name,\n 'issue_id': issue_id,\n })\n\n label_list = [label_dict['name'] for label_dict in json['labels']]\n\n return Issue(json['number'], label_list)\n except ResourceNotFound:\n return None", "def employee_id(self) -> int:\n return self._employee_id", "def ticket_id(self):\n return self._ticket_id", "def eihealth_project_id(self):\n return self._eihealth_project_id", "def JournalIssue(self, default=None):\n return self.data.get('journal_issue', default)", "def get_worklog(issue):\n\tworklogs = []\n\tworklog_field = issue.get('fields', {}).get('worklog', False)\n\n\tif worklog_field:\n\t\tworklogs = worklog_field.get('worklogs', [])\n\n\treturn worklogs", "def employee_id(self) -> str:\n return pulumi.get(self, \"employee_id\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the issue_id of this ShowProjectWorkHoursResponseBodyWorkHours. 工作项id
def issue_id(self, issue_id): self._issue_id = issue_id
[ "def issue_id(self):\n return self._issue_id", "def issue_id(self) -> str:\n return self._yaml[\"commit\"][\"issue_id\"]", "def workitem_id(self, workitem_id):\n self._workitem_id = workitem_id", "def SetIssue(self, issue=None):\n assert self.GetBranch()\n if issue:\n issue = int(issue)\n self._GitSetBranchConfigValue(ISSUE_CONFIG_KEY, str(issue))\n self.issue = issue\n codereview_server = self.GetCodereviewServer()\n if codereview_server:\n self._GitSetBranchConfigValue(\n CODEREVIEW_SERVER_CONFIG_KEY, codereview_server)\n else:\n # Reset all of these just to be clean.\n reset_suffixes = [\n LAST_UPLOAD_HASH_CONFIG_KEY,\n ISSUE_CONFIG_KEY,\n PATCHSET_CONFIG_KEY,\n CODEREVIEW_SERVER_CONFIG_KEY,\n GERRIT_SQUASH_HASH_CONFIG_KEY,\n ]\n for prop in reset_suffixes:\n try:\n self._GitSetBranchConfigValue(prop, None)\n except subprocess2.CalledProcessError:\n pass\n msg = RunGit(['log', '-1', '--format=%B']).strip()\n if msg and git_footers.get_footer_change_id(msg):\n print('WARNING: The change patched into this branch has a Change-Id. '\n 'Removing it.')\n RunGit(['commit', '--amend', '-m',\n git_footers.remove_footer(msg, 'Change-Id')])\n self.lookedup_issue = True\n self.issue = None\n self.patchset = None", "def issue(self, issue):\n\n self._issue = issue", "def work_hours_num(self, work_hours_num):\n self._work_hours_num = work_hours_num", "def add_issue(self, issue):\r\n\t\tself[\"issues\"][issue[\"id\"]] = issue", "def cmd_open_issue(work_item_id: Union[str, int]) -> None:\n work_item_id = str(work_item_id).lstrip(\"#\")\n\n project = get_config(\"project\")\n organization = get_config(\"organization\")\n\n click.launch(f\"{organization}/{project}/_workitems/edit/{work_item_id}\")", "def issue(self, issue_id):\n return Issue.find(issue_id, self)", "def work_hours_setting(self, work_hours_setting):\n\n self._work_hours_setting = work_hours_setting", "def find_issue_id(self):", "def challenge_id(self, challenge_id):\n\n self._challenge_id = challenge_id", "def _find_issue(self, message, issue_id):\n url = \"%s/issues/%s.json\" % (self.redmine_url, issue_id)\n response = request.get(url)\n if response.status_code != 200:\n return\n\n try:\n issue = response.json()[\"issue\"]\n except Exception:\n return\n\n msg = \"RM %s #%s: %s [Status: %s, Priority: %s, Assignee: %s]\" % (\n issue[\"tracker\"][\"name\"],\n issue[\"id\"],\n issue[\"subject\"],\n issue[\"status\"][\"name\"],\n issue[\"priority\"][\"name\"],\n issue.get(\"assigned_to\", {}).get(\"name\", \"N/A\"))\n url = \"https://%s/issues/%s\" % (self.redmine_domain, issue[\"id\"])\n\n message.dispatch(\"%s %s\" % (msg, url))", "def ticket_id(self, ticket_id):\n self._ticket_id = ticket_id", "def get_issue_with_id(self, issue_id):\n url = self._api(\"/rest/api/2/issue/%s\" % issue_id)\n return requests.get(url, auth=self.auth)", "def issue_time(self, issue_time):\n\n self._issue_time = issue_time", "def answer_issue(self, issue: int, option: int) -> etree.Element:\n issueEffect = self.shards_xml(c=\"issue\", issue=str(issue), option=str(option))[\n \"issue\"\n ]\n return issueEffect", "def open_id_issues_url(self, open_id_issues_url):\n\n self._open_id_issues_url = open_id_issues_url", "def workitem_id(self):\n return self._workitem_id" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the issue_type of this ShowProjectWorkHoursResponseBodyWorkHours. 工作项类型
def issue_type(self): return self._issue_type
[ "def get_issue_type(issue):\n\tissue_type = ''\n\tif 'issuetype' in issue['fields'] and issue['fields']['issuetype'] is not None:\n\t\tissue_type = issue['fields']['issuetype'].get('name', '')\n\treturn issue_type", "def work_hours_type_name(self):\n return self._work_hours_type_name", "def type(self):\n return 'issue'", "def issue_types(self):\n return ['issue']", "def workpiece_type(self):\n return self._workpiece_type", "def type(self) -> Optional[pulumi.Input['TestIssueType']]:\n return pulumi.get(self, \"type\")", "def problem_type(self):\n return self._problem_type", "def getIssueType(self):\n return 0 # https://portswigger.net/burp/help/scanner_issuetypes.html", "def issue_type(self, issue_type):\n self._issue_type = issue_type", "def get_issue(self):\n issue_id = self.kwargs['issue_id']\n try:\n issue = Issue.objects.get(pk=issue_id)\n except ObjectDoesNotExist:\n raise ObjectNotFound('Not found')\n if issue.project.pk != self.project.pk:\n raise ObjectNotFound('Not found')\n return issue", "def work_hours_type_name(self, work_hours_type_name):\n self._work_hours_type_name = work_hours_type_name", "def orcid_work_type(self):\n inspire_doc_type = get_value(self.record, \"document_type[0]\")\n return self.INSPIRE_DOCTYPE_TO_ORCID_TYPE[inspire_doc_type]", "def _quality_issue_type():\n return {\n 'name' : 'quality_issue_type',\n 'is_open' : False,\n 'doc' : None,\n 'members' : [\n ('metadata', None),\n ('data_format', None),\n ('data_content', None),\n ('data_indexing', None),\n ('science', None),\n ],\n }", "def get_type_of_habit(self,obj):\n type_of_habit = obj.habit.type_of_habit\n return type_of_habit", "def job_details_type(self) -> str:\n return pulumi.get(self, \"job_details_type\")", "def get_worklog(issue):\n\tworklogs = []\n\tworklog_field = issue.get('fields', {}).get('worklog', False)\n\n\tif worklog_field:\n\t\tworklogs = worklog_field.get('worklogs', [])\n\n\treturn worklogs", "def problem_type(self, problem_type):\n if problem_type is None:\n raise ValueError(\"Invalid value for `problem_type`, must not be `None`\") # noqa: E501\n\n self._problem_type = problem_type", "def GetIssue(self):\n if self.issue is None and not self.lookedup_issue:\n if self.GetBranch():\n self.issue = self._GitGetBranchConfigValue(ISSUE_CONFIG_KEY)\n if self.issue is not None:\n self.issue = int(self.issue)\n self.lookedup_issue = True\n return self.issue", "def issue(self, issue_id):\n return Issue.find(issue_id, self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the issue_type of this ShowProjectWorkHoursResponseBodyWorkHours. 工作项类型
def issue_type(self, issue_type): self._issue_type = issue_type
[ "def issue_set_type(self, issue, type):", "def workpiece_type(self, workpiece_type):\n self._workpiece_type = workpiece_type", "def problem_type(self, problem_type):\n if problem_type is None:\n raise ValueError(\"Invalid value for `problem_type`, must not be `None`\") # noqa: E501\n\n self._problem_type = problem_type", "def work_hours_type_name(self, work_hours_type_name):\n self._work_hours_type_name = work_hours_type_name", "def issue_type(self):\n return self._issue_type", "def issue_time(self, issue_time):\n\n self._issue_time = issue_time", "def issue_types(self):\n return ['issue']", "def get_issue_type(issue):\n\tissue_type = ''\n\tif 'issuetype' in issue['fields'] and issue['fields']['issuetype'] is not None:\n\t\tissue_type = issue['fields']['issuetype'].get('name', '')\n\treturn issue_type", "def work_hours_setting(self, work_hours_setting):\n\n self._work_hours_setting = work_hours_setting", "def type(self):\n return 'issue'", "def issue(self, issue):\n\n self._issue = issue", "def type(self) -> Optional[pulumi.Input['TestIssueType']]:\n return pulumi.get(self, \"type\")", "def hw_type(self, hw_type):\n if self.local_vars_configuration.client_side_validation and hw_type is None: # noqa: E501\n raise ValueError(\"Invalid value for `hw_type`, must not be `None`\") # noqa: E501\n\n self._hw_type = hw_type", "def work_hours_type_name(self):\n return self._work_hours_type_name", "def SetIssue(self, issue=None):\n assert self.GetBranch()\n if issue:\n issue = int(issue)\n self._GitSetBranchConfigValue(ISSUE_CONFIG_KEY, str(issue))\n self.issue = issue\n codereview_server = self.GetCodereviewServer()\n if codereview_server:\n self._GitSetBranchConfigValue(\n CODEREVIEW_SERVER_CONFIG_KEY, codereview_server)\n else:\n # Reset all of these just to be clean.\n reset_suffixes = [\n LAST_UPLOAD_HASH_CONFIG_KEY,\n ISSUE_CONFIG_KEY,\n PATCHSET_CONFIG_KEY,\n CODEREVIEW_SERVER_CONFIG_KEY,\n GERRIT_SQUASH_HASH_CONFIG_KEY,\n ]\n for prop in reset_suffixes:\n try:\n self._GitSetBranchConfigValue(prop, None)\n except subprocess2.CalledProcessError:\n pass\n msg = RunGit(['log', '-1', '--format=%B']).strip()\n if msg and git_footers.get_footer_change_id(msg):\n print('WARNING: The change patched into this branch has a Change-Id. '\n 'Removing it.')\n RunGit(['commit', '--amend', '-m',\n git_footers.remove_footer(msg, 'Change-Id')])\n self.lookedup_issue = True\n self.issue = None\n self.patchset = None", "def experiment_type(self, experiment_type):\n \n self._experiment_type = experiment_type", "def defect_type(self, defect_type):\n\n self._defect_type = defect_type", "def hu_type(self, hu_type):\n self._hu_type = hu_type", "def worker_type(self, worker_type):\n \n self._worker_type = worker_type" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the closed_time of this ShowProjectWorkHoursResponseBodyWorkHours. 工作项结束时间
def closed_time(self): return self._closed_time
[ "def actual_work_hours(self):\n return self._actual_work_hours", "def work_hours_updated_time(self):\n return self._work_hours_updated_time", "def end_time(self):\n return self._end_time", "def get_working_hour(self):\n working_hrs_id = self.search([('active', '=', True)])\n if working_hrs_id:\n return {\n 'biz_open_time': time(int(working_hrs_id.start_hour), int(working_hrs_id.start_minute), 0),\n 'biz_close_time': time(int(working_hrs_id.end_hour), int(working_hrs_id.end_minute), 0),\n 'holiday_list': {line.occ_date: line.name for line in working_hrs_id.non_working_days_line},\n 'deadline_revise_percentage': working_hrs_id.deadline_revise_percentage,\n }\n raise ValidationError(\"Working Hour configuration is missing!\")", "def get_end_hour(self) -> str:\n return self.end_hour", "def time_end(self):\n return self._time_end", "def close_time(self):\n return dt.time(23, 59)", "def get_end_time(self):\n if self.invalid():\n return None\n interval = self.get_interval(ARBITRARY_DATE)\n return interval.end.time()", "def work_hours_created_time(self):\n return self._work_hours_created_time", "def cal_end(self):\n return self.datetime_end", "def end_time(self):\n return from_ts(self.end_time_ms)", "def open_hours_detail(self):\n return self._open_hours_detail", "def end_time(self) -> Union[str, float]:\n if self.get_from_redis(\"end_time\") != \"None\":\n return float(self.get_from_redis(\"end_time\"))\n return \"None\"", "def work_hours_num(self):\n return self._work_hours_num", "def active_end_time(self):\n return self._active_end_time", "def ends(self):\n return self.time_end", "def get_end_time(self):\n if \"end\" in self.run and self.run[\"end\"] > 0:\n return datetime.fromtimestamp(self.run['end'] / 1000)\n else:\n raise ValueError(\"Scenario run has not completed\")", "def closed_at(self):\n return string_to_datetime(self._dict.get('closed_at'))", "def weekend_end(self) -> int:\n return self._data['week_data']['weekend_end']" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the closed_time of this ShowProjectWorkHoursResponseBodyWorkHours. 工作项结束时间
def closed_time(self, closed_time): self._closed_time = closed_time
[ "def work_hours_updated_time(self, work_hours_updated_time):\n self._work_hours_updated_time = work_hours_updated_time", "def actual_work_hours(self, actual_work_hours):\n self._actual_work_hours = actual_work_hours", "def closed_time(self):\n return self._closed_time", "def end_date_time(self, end_date_time):\n\n self._end_date_time = end_date_time", "def end_time(self, end_time):\n\n self._end_time = end_time", "def work_hours_setting(self, work_hours_setting):\n\n self._work_hours_setting = work_hours_setting", "def work_hours_created_time(self, work_hours_created_time):\n self._work_hours_created_time = work_hours_created_time", "def open_hours_detail(self, open_hours_detail):\n\n self._open_hours_detail = open_hours_detail", "def closed(self, closed):\n\n self._closed = closed", "def case_close_time(self, operator: Enum, case_close_time: Arrow | datetime | int | str):\n case_close_time = self.util.any_to_datetime(case_close_time).strftime('%Y-%m-%d %H:%M:%S')\n self._tql.add_filter('caseCloseTime', operator, case_close_time, TqlType.STRING)", "def close(self, end_time: int = None):", "def work_hours_num(self, work_hours_num):\n self._work_hours_num = work_hours_num", "def bookable_end_date_time(self, bookable_end_date_time):\n\n self._bookable_end_date_time = bookable_end_date_time", "def close_stat(stat_id, end_time):\n Statistic.where('id', '=', stat_id).update(endTime=end_time)", "def actual_close_date(self, actual_close_date):\n\n self._actual_close_date = actual_close_date", "def set_end_time(td, end_time):\n\n td.setEndTime(end_time)", "def finish_time(self, finish_time):\n\n self._finish_time = finish_time", "def _set_end_time(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=unicode, default=unicode(\"\"), is_leaf=True, yang_name=\"end-time\", rest_name=\"to\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'To time period granting user access', u'alt-name': u'to', u'cli-suppress-no': None, u'display-when': u'(../access-time)'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='string', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"end_time must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=unicode, default=unicode(\"\"), is_leaf=True, yang_name=\"end-time\", rest_name=\"to\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'To time period granting user access', u'alt-name': u'to', u'cli-suppress-no': None, u'display-when': u'(../access-time)'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='string', is_config=True)\"\"\",\n })\n\n self.__end_time = t\n if hasattr(self, '_set'):\n self._set()", "def finish_time(self, finish_time):\n self._finish_time = finish_time" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the work_hours_created_time of this ShowProjectWorkHoursResponseBodyWorkHours. 工时创建时间
def work_hours_created_time(self): return self._work_hours_created_time
[ "def work_hours_created_time(self, work_hours_created_time):\n self._work_hours_created_time = work_hours_created_time", "def work_hours_updated_time(self):\n return self._work_hours_updated_time", "def created_time(self):\n return self._created_time", "def create_time(self):\n return self._create_time", "def work_hours_num(self):\n return self._work_hours_num", "def actual_work_hours(self):\n return self._actual_work_hours", "def get_working_hour(self):\n working_hrs_id = self.search([('active', '=', True)])\n if working_hrs_id:\n return {\n 'biz_open_time': time(int(working_hrs_id.start_hour), int(working_hrs_id.start_minute), 0),\n 'biz_close_time': time(int(working_hrs_id.end_hour), int(working_hrs_id.end_minute), 0),\n 'holiday_list': {line.occ_date: line.name for line in working_hrs_id.non_working_days_line},\n 'deadline_revise_percentage': working_hrs_id.deadline_revise_percentage,\n }\n raise ValidationError(\"Working Hour configuration is missing!\")", "def created_time(self) -> float:\n return self._created_time", "def get_creation_time(self):\n return self.get_attr('date_created')", "def _get_created_time(issue) -> str:\n return issue.created_at.strftime(TIME_FMT)", "def get_creation_time(self):\n return self.creation_time", "def creation_time(self) -> str:\n return pulumi.get(self, \"creation_time\")", "def _job_get_create_time(self, job_id):\n # check if we can / should update\n if (self.jobs[job_id]['gone'] is not True) \\\n and (self.jobs[job_id]['create_time'] is None):\n self.jobs[job_id] = self._job_get_info(job_id=job_id)\n\n return self.jobs[job_id]['create_time']", "def creation_date_time(self):\n return self._creation_date_time", "def work_hours_updated_time(self, work_hours_updated_time):\n self._work_hours_updated_time = work_hours_updated_time", "def creation_time_str(self):\n return \"%Y/%m/%d %I:%M:%S\".format(self.creation_time)", "def expected_work_hours(self):\n return self._expected_work_hours", "def get_time(self):\n return self.block.create_time", "def team_creation_time_utc(self) -> str:\n return pulumi.get(self, \"team_creation_time_utc\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the work_hours_created_time of this ShowProjectWorkHoursResponseBodyWorkHours. 工时创建时间
def work_hours_created_time(self, work_hours_created_time): self._work_hours_created_time = work_hours_created_time
[ "def work_hours_updated_time(self, work_hours_updated_time):\n self._work_hours_updated_time = work_hours_updated_time", "def work_hours_created_time(self):\n return self._work_hours_created_time", "def actual_work_hours(self, actual_work_hours):\n self._actual_work_hours = actual_work_hours", "def work_hours_setting(self, work_hours_setting):\n\n self._work_hours_setting = work_hours_setting", "def time_created(self, time_created):\n self._time_created = time_created", "def work_hours(self, work_hours):\n if work_hours is not None and len(work_hours) > 1024:\n raise ValueError(\"Invalid value for `work_hours`, length must be less than or equal to `1024`\") # noqa: E501\n\n self._work_hours = work_hours", "def created_time(self, created_time):\n\n self._created_time = created_time", "def created_time(self, created_time):\n self._created_time = created_time", "def work_hours_num(self, work_hours_num):\n self._work_hours_num = work_hours_num", "def created_time(self, created_time: float):\n\n self._created_time = created_time", "def expected_work_hours(self, expected_work_hours):\n self._expected_work_hours = expected_work_hours", "def create_time(self, create_time):\n\n self._create_time = create_time", "def creation_time(self, creation_time):\n\n self._creation_time = creation_time", "def work_hours_updated_time(self):\n return self._work_hours_updated_time", "def created_date_time(self, created_date_time):\n\n self._created_date_time = created_date_time", "def creation_date_time(self, creation_date_time):\n self._creation_date_time = creation_date_time", "def work_hours_type_name(self, work_hours_type_name):\n self._work_hours_type_name = work_hours_type_name", "def get_working_hour(self):\n working_hrs_id = self.search([('active', '=', True)])\n if working_hrs_id:\n return {\n 'biz_open_time': time(int(working_hrs_id.start_hour), int(working_hrs_id.start_minute), 0),\n 'biz_close_time': time(int(working_hrs_id.end_hour), int(working_hrs_id.end_minute), 0),\n 'holiday_list': {line.occ_date: line.name for line in working_hrs_id.non_working_days_line},\n 'deadline_revise_percentage': working_hrs_id.deadline_revise_percentage,\n }\n raise ValidationError(\"Working Hour configuration is missing!\")", "def time_of_creation(self, time_of_creation):\n \n self._time_of_creation = time_of_creation" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the work_hours_updated_time of this ShowProjectWorkHoursResponseBodyWorkHours. 工时更新时间
def work_hours_updated_time(self): return self._work_hours_updated_time
[ "def work_hours_updated_time(self, work_hours_updated_time):\n self._work_hours_updated_time = work_hours_updated_time", "def work_hours_created_time(self):\n return self._work_hours_created_time", "def updated_time(self):\n return self._updated_time", "def update_time(self):\n return self._update_time", "def last_update_time(self):\n return self._last_update_time", "def actual_work_hours(self):\n return self._actual_work_hours", "def get_update_time(self):\n return self._utime", "def last_update(self: DetailedForecast) -> datetime:\n return self.update_time", "def last_end_update_time(self) -> str:\n return pulumi.get(self, \"last_end_update_time\")", "def last_update_time_in_minutes(self):\n return self._last_update_time_in_minutes", "def updated(self) -> datetime:\n return datetime.strptime(self.data['updated_at'],\n '%Y-%m-%dT%H:%M:%S.%fZ')", "def get_last_updated_at(self):\n return self.last_updated", "def last_update_datetime(self):\n return datetime.strptime(self.last_update, \"%Y-%m-%d %H:%M:%S.%f\")", "def updated_datetime(self) -> datetime:\n return utc_to_local(self._db_data.updated_datetime)", "def updated_at(self):\n return self._updated_at", "def get_working_hour(self):\n working_hrs_id = self.search([('active', '=', True)])\n if working_hrs_id:\n return {\n 'biz_open_time': time(int(working_hrs_id.start_hour), int(working_hrs_id.start_minute), 0),\n 'biz_close_time': time(int(working_hrs_id.end_hour), int(working_hrs_id.end_minute), 0),\n 'holiday_list': {line.occ_date: line.name for line in working_hrs_id.non_working_days_line},\n 'deadline_revise_percentage': working_hrs_id.deadline_revise_percentage,\n }\n raise ValidationError(\"Working Hour configuration is missing!\")", "def modified_time(self) -> float:\n return self._modified_time", "def work_hours_num(self):\n return self._work_hours_num", "def last_updated(self):\n return self._last_updated" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the work_hours_updated_time of this ShowProjectWorkHoursResponseBodyWorkHours. 工时更新时间
def work_hours_updated_time(self, work_hours_updated_time): self._work_hours_updated_time = work_hours_updated_time
[ "def work_hours_updated_time(self):\n return self._work_hours_updated_time", "def work_hours_created_time(self, work_hours_created_time):\n self._work_hours_created_time = work_hours_created_time", "def actual_work_hours(self, actual_work_hours):\n self._actual_work_hours = actual_work_hours", "def updated_time(self, updated_time):\n self._updated_time = updated_time", "def work_hours_setting(self, work_hours_setting):\n\n self._work_hours_setting = work_hours_setting", "def update_time(self, update_time):\n\n self._update_time = update_time", "def update_time(self, update_time):\n self._update_time = update_time", "def last_updated_time(self, last_updated_time):\n\n self._last_updated_time = last_updated_time", "def work_hours_created_time(self):\n return self._work_hours_created_time", "def setLastTimeUpdated(self, tg):\n self.last_time_updated = tg", "def updated_time(self):\n return self._updated_time", "def expected_work_hours(self, expected_work_hours):\n self._expected_work_hours = expected_work_hours", "def work_hours_num(self, work_hours_num):\n self._work_hours_num = work_hours_num", "def set_update_time(self, *args, **kwargs):\n return _qtgui_swig.time_sink_c_sptr_set_update_time(self, *args, **kwargs)", "def work_hours(self, work_hours):\n if work_hours is not None and len(work_hours) > 1024:\n raise ValueError(\"Invalid value for `work_hours`, length must be less than or equal to `1024`\") # noqa: E501\n\n self._work_hours = work_hours", "def actual_work_hours(self):\n return self._actual_work_hours", "def setTime(self, timeObj, day=None):\n\n # override day if it's None\n if not day:\n day = getDayFromNum(timeObj.weekday())\n\n self._fileCache[day][\"time-hr\"] = timeObj.hour\n self._fileCache[day][\"time-min\"] = timeObj.minute\n self._updateConfig()", "def set_update_time(self, *args, **kwargs):\n return _qtgui_swig.time_sink_f_sptr_set_update_time(self, *args, **kwargs)", "def set_update_time(self, *args, **kwargs):\n return _qtgui_swig.time_sink_c_set_update_time(self, *args, **kwargs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return default zoom setting.
def _defaultZoom(self): return (-1.0, 1.0, -1.0, 1.0)
[ "def _get_zoom(self) :\n \n # TODO : make it absolute zoom value : a zoom of 1 displays one data\n # pixel in one viewport pixel.\n \n return self._zoom", "def zoom(self) -> Zoom:\n return self.__zoom", "def zoom(self) -> float:\n return self._zoom", "def zoom(self):\n res = np.max(self.metadata[\"resolution\"])\n\n if self.atlas_name == \"allen_human_500um\":\n logger.debug(\n \"ATLAS: setting zoom manually for human atlas, atlas needs fixing\"\n )\n return 350\n else:\n return 40 / res", "def google_defaults():\n return {\n \"zoom\": \"18\",\n \"size\": \"629x629\",\n \"maptype\": \"satellite\"\n }", "def zoomValue(self):\n self.parent.zoom = self.parent.ui.spinBox_zoom.value()\n self.anypoint_view()", "def zoom(self, value):\n if value >= 0 and value <= 1.0:\n self.__zoom = value", "def get_zoom_transform(self):\n return self.zoom_levels[self.cur_zoom][1]", "def _getDefaultSettings(cls):\n return {'minimumROIDimensions': 1,\n 'minimumROISize': None, # Skip testing the ROI size by default\n 'normalize': False,\n 'normalizeScale': 1,\n 'removeOutliers': None,\n 'resampledPixelSpacing': None, # No resampling by default\n 'interpolator': 'sitkBSpline', # Alternative: sitk.sitkBSpline,\n 'padDistance': 5,\n 'distances': [1],\n 'force2D': False,\n 'force2Ddimension': 0,\n 'label': 1,\n 'enableCExtensions': True,\n 'additionalInfo': True}", "def action_set_zoom(self, value):\n if value >= 0 and value < len(self.zoom_levels) and value != self.cur_zoom:\n self.cur_zoom = value\n self.apply_zoom()", "def set_zoom(self, zoom):\n self.widget.zoomTo(zoom)", "def getDefaultSettings(self) -> ghidra.docking.settings.Settings:\n ...", "def zoom(self):\n\n map_size = {\"height\": 900, \"width\": 1900}\n max_zoom = 21 # maximum zoom level based on Google Map API\n world_dimension = {'height': 256, 'width': 256} # min map size for entire world\n\n latitudes = [lat for lat, lon, *rest in self._points]\n longitudes = [lon for lat, lon, *rest in self._points]\n\n # calculate longitude span between east and west\n delta = max(longitudes) - min(longitudes)\n if delta < 0:\n lon_span = (delta + 360) / 360\n else:\n lon_span = delta / 360\n\n # calculate latitude span between south and north\n lat_span = (self._lat_rad(max(latitudes)) - self._lat_rad(min(latitudes))) / math.pi\n\n # get zoom for both latitude and longitude\n zoom_lat = math.floor(math.log(map_size['height'] / world_dimension['height'] / lat_span) / math.log(2))\n zoom_lon = math.floor(math.log(map_size['width'] / world_dimension['width'] / lon_span) / math.log(2))\n\n return min(zoom_lat, zoom_lon, max_zoom)-1", "def DefaultKM(self):\n return self._default_km", "def __zoomReset(self):\n if QApplication.focusWidget() == e5App().getObject(\"Shell\"):\n e5App().getObject(\"Shell\").zoomTo(0)\n else:\n aw = self.activeWindow()\n if aw:\n aw.zoomTo(0)\n self.sbZoom.setValue(aw.getZoom())", "def test_map_settings_default():\n m = view(world)\n assert m.location == [\n pytest.approx(-3.1774349999999956, rel=1e-6),\n pytest.approx(2.842170943040401e-14, rel=1e-6),\n ]\n assert m.options[\"zoom\"] == 10\n assert m.options[\"zoomControl\"] == True\n assert m.position == \"relative\"\n assert m.height == (100.0, \"%\")\n assert m.width == (100.0, \"%\")\n assert m.left == (0, \"%\")\n assert m.top == (0, \"%\")\n assert m.global_switches.no_touch is False\n assert m.global_switches.disable_3d is False\n assert \"openstreetmap\" in m.to_dict()[\"children\"].keys()", "def zoom_level(self) -> ZoomLevel:\n resp = self._stub.GetZoomLevel(self._camera)\n return ZoomLevel(resp.level)", "def scale():\n if viewMode == Mode_Isometric:\n return scale2d\n if viewMode == Mode_Perspective:\n return scale3d", "def OnZoomSet(self, event):\n MINZ = 0\n MAXZ = 1000\n newzoom_scale = None\n num = self.zoom.GetSelection()\n if num >= 0: # selection from list\n newzoom_scale = self.zoom.GetClientData(num)\n else: # combo text\n astring = self.zoom.GetValue().strip().replace('%','') # ignore percent sign\n try:\n numvalue = float(astring)\n if numvalue < MINZ or numvalue > MAXZ:\n numvalue = None\n except ValueError:\n numvalue = None\n if numvalue: # numeric value\n newzoom_scale = numvalue/100.0\n else: # valid text?\n textvalue = self.zoom.GetValue()\n for k in range(len(self.comboval)):\n if textvalue.lower() == self.comboval[k][0].lower():\n newzoom_scale = self.comboval[k][1]\n break\n\n if newzoom_scale:\n self.viewer.SetZoom(newzoom_scale) # will send update to set zoomtext\n else:\n self.zoom.SetValue(self.zoomtext) # restore last good value\n event.Skip()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Approximates root of this function using single iteration of Newton's method.
def newtonsMethod(self, x, a): return x - a * (self._f(x) / self._df(x))
[ "def inexact_newton(f,x0,delta = 1.0e-7, epsilon=1.0e-6, LOUD=False):\n x = x0\n if (LOUD):\n print(\"x0 =\",x0)\n iterations = 0\n while (np.fabs(f(x)) > epsilon):\n fx = f(x)\n fxdelta = f(x+delta)\n slope = (fxdelta - fx)/delta\n if (LOUD):\n print(\"x_\",iterations+1,\"=\",x,\"-\",fx,\"/\",slope,\"=\",x - fx/slope)\n x = x - fx/slope\n iterations += 1\n print(\"It took\",iterations,\"iterations\")\n return x #return estimate of root", "def newtonsMethod( func, guess, iterations = 10, **kwargs ):\n\n x0 = D( guess )\n\n for i in range( iterations ):\n x0 = x0 - D( func( x0 ) / definiteDerivative( func, x0, **kwargs ) )\n\n return x0", "def NewtonsMethod(f, df, x, max_iter=100, eps=0.00001): \n y = f(x)\n dy = df(x)\n \n for i in range(max_iter):\n \n # First, check if we have df(x) is not close to 0. If it is zero, we can't\n # find a solution.\n if np.isclose(df(x), 0.0, eps):\n print(f\"Solution can't be found df(x) = {df(x)}\")\n return None\n \n # Find new guess\n x_ = x - f(x) / df(x)\n \n # Check if we found a root \n if np.abs(np.isclose(f(x), 0.0, eps)):\n return x_\n \n # If not, update \n x = x_\n \n # print(f\"Newton did not converge\")\n return None", "def newton(f, xinit, tol, N):\n if f(xinit) < tol:\n return xinit\n else:\n n = 1\n while n < N:\n xnew = xinit - (f(xinit) / derivative(f, xinit))\n if abs(f(xnew)) < tol:\n print('Root found. Number of iterations: ', n)\n return xnew\n break\n else:\n xinit = xnew\n n = n + 1\n else:\n return 'Max iterations reached. No root found within chosen tolerance.'", "def newton_sqrt1(x):\n val = x\n while True:\n last = val\n val = (val + n / val) * 0.5\n if abs(val - last) < 1e-9:\n break\n return val", "def rootfind_newton(func, x0, a, b, maxiter=50, tol=1.0e-11):\n\n for iter in xrange(maxiter):\n\n fval, fpval, args = func(x0)\n # print \"x0=%.4f fval=%.2e fpval=%.2e [%.4f, %.4f]\" % (x0, fval, fpval, a, b)\n\n if fval < 0:\n a = x0\n else:\n b = x0\n\n x = x0 - fval/fpval\n if not (a < x < b):\n # Once we have bracketed the root, we don't allow the\n # iterations to leave the bracket.\n x = 0.5*(a+b)\n\n if np.abs(x-x0) < tol or np.abs(fval) < tol:\n break\n\n x0 = x\n\n return x, fval, iter, args", "def newton_method(function, function_prime, guess = 1.234, num_iter = 8):\n\txi = guess\n\tfor i in range(num_iter):\n\t\tf_xi = function(xi)\n\t\tfp_xi = function_prime(xi)\n\n\t\txi -= f_xi / fp_xi\n\n\treturn x", "def newton_method(f, x, Ep, step):\n\n while True:\n step = step + 1\n # print(\"bnd1:=\",bnd1)\n h = f(x) / derivative(f, x)\n x = x - h\n if (decide(abs(h) <= Ep)):\n break\n # print(\"Root in Approximation: \",bnd1)\n return step", "def newton_iteration(f: Callable, df: Callable, eps: float, x0: float = None, a: float = None, b: float = None,\n weight: float = 0.9, display: bool = False, max_iterations: int = 100) -> float:\n x = np.inf\n if x0 is None:\n x0 = (a + b) / 2\n if a is not None and b is not None and a == b:\n return a\n x_next = x0\n iterations = 0\n while abs(x - x_next) > eps and iterations < max_iterations:\n iterations += 1\n x = x_next\n\n if display:\n import matplotlib.pyplot as plt\n xx0 = a or x-1\n xx1 = b or x+1\n xx = np.linspace(xx0, xx1, 100)\n yy = np.array(list(map(f, xx)))\n plt.plot(xx, yy)\n plt.axvline(x=x)\n plt.show()\n\n f_x = f(x)\n try:\n df_x = df(x)\n except ZeroDivisionError:\n df_x = (f_x - f(x-eps))/eps\n if df_x != 0:\n x_next = x - f_x / df_x\n\n if a is not None and x_next < a:\n x_next = weight * a + (1 - weight) * x\n elif b is not None and x_next > b:\n x_next = weight * b + (1 - weight) * x\n\n if a is not None and x_next < a:\n x_next = a\n if b is not None and x_next > b:\n x_next = b\n\n return x_next", "def newton(n):\r\n \r\n tol = 0.000000001\r\n guess = 1.0\r\n\r\n while True:\r\n guess = (guess + n / guess) / 2\r\n diff = abs(n - guess ** 2)\r\n if diff <= tol:\r\n break\r\n return guess", "def newton_rhapson(func,x0,deriv=None,tol=1e-15,maxiter=20):\n #import pdb\n #pdb.set_trace()\n i = maxiter\n if deriv is None:\n deriv = lambda x: 1e15*(func(x+1e-15)-func(x))\n error = np.inf\n while (np.abs(error)>tol) and (i>0):\n dx = -func(x0)/deriv(x0)\n error = dx/x0\n x0 = x0+dx\n i -= 1\n root = x0\n if i==0:\n raise UserWarning(\"Root not found to specified tolerance within {} iterations\".format(maxiter))\n return root\n else:\n return root", "def newton(f, df, x, n=5):\n ret = [x]\n for i in range(n):\n if df(x) == 0:\n break\n o = x - (f(x) / df(x))\n ret.append(o)\n x = o\n return ret", "def newton1d(f, df, ddf, x, niter=10):\n for i in xrange(niter):\n x_new = x - df(x)/ddf(x)\n x = x_new\n return x", "def newton_raphson(x0, e=0.00001, total_iterations=15):\n steps_taken = 0\n checker = 1\n condition = True\n\n while condition:\n if g(x0) == 0.0: \n print(\"Zero division\")\n break\n\n x1 = x0 - f(x0)/g(x0)\n x0=x1\n steps_taken+=1\n\n if steps_taken > total_iterations:\n checker = 0\n break\n \n condition = abs(f(x1)) > e\n # print(x1, condition, 'iterations: ', steps_taken)\n\n if checker==1:\n print(\"Newton Raphson Mtd: next root at \", x1) \n else:\n print(\" Not convergent\")\n return x1", "def quasi_newtons_method(f, initial_guess, max_iter = 10000, method = 'BFGS', tol = 1e-12):\n \n if method not in ['BFGS', 'DFP', 'Broyden']:\n raise Exception(\"Not a valid method.\")\n x = initial_guess\n H = np.identity(len(x))\n for i in range(max_iter):\n x_vector = ad.create_vector('x', x)\n fn_at_x = f(x_vector)\n gradient = fn_at_x.getGradient(['x{}'.format(i) for i in range(1, len(x) + 1)])\n\n p = -H @ gradient\n \n alpha = line_search(f, x, p)\n delta_x = alpha * p\n\n x = x + delta_x\n x_vector2 = ad.create_vector('x', x)\n fn_at_x2 = f(x_vector2)\n gradient2 = fn_at_x2.getGradient(['x{}'.format(i) for i in range(1, len(x) + 1)])\n if np.sqrt(np.abs(gradient2).sum()) < tol:\n break\n y = (gradient2 - gradient).reshape(-1, 1)\n delta_x = delta_x.reshape(-1, 1)\n if method == 'BFGS':\n H = (np.identity(len(H)) - (delta_x @ y.T) / (y.T @ delta_x)) @ H \\\n @ (np.identity(len(H)) - (y @ delta_x.T) / (y.T @ delta_x)) + (delta_x @ delta_x.T) / (y.T @ delta_x)\n elif method == 'DFP':\n H = H + (delta_x @ delta_x.T) / (delta_x.T @ y) - (H @ y @ y.T @ H) / (y.T @ H @ y)\n elif method == 'Broyden':\n H = H + ((delta_x - H @ y) @ delta_x.T @ H) / (delta_x.T @ H @ y)\n\n return (x, i + 1)", "def newton_raphson(x1, fx1, f1):\n return x1 - f1/fx1", "def newton_raphson(f, x, ER, N):\n #Autor: Estefano Ramos C.I: V-267778542\n #Fecha: 05/11/2019\n #Versión: 1.0.1\n\n df = derivada(f) #Se obtine la derivada de la funcion.\n xi = x #Se crea la variable que almacenara la aproximación de la raiz y se le asigna la aproximación inicial como el primer valor a iterar.\n x_1= 0 #Se crea la variable auxiliar para almacenar la aproximación actual.\n ea=ER+1 #Se crea la variable que almacena el error acumulado, se inicializa con el valor minimo del error relativo + 1 para que no falle el programa.\n i=0 #se crea un contador para saber en que iteracion estamos\n\n print(\"Aproximación inicial:\", xi) #Mensaje inicial.\n\n while ( i<=N and ea>ER): #Inicio de un loop mientras el contador sea menor al numero maximo de iteraciones y el error aprozimado no sea el optimo.\n\n i= i + 1 #Incrementa el contador.\n \n x_1= xi-(f(xi)/df(xi)) #Se calcula la siguiente aproximación.\n\n ea=abs((x_1-xi)/x_1) #Se calcula el nuevo error.\n\n xi = x_1 #A la aproximación actual se le asigna la siguiente.\n\n print(\"Iteración:\", i, \"Aproximación:\", xi, \"Error:\", ea) #Estado de las variables por iteracion.\n \n return xi #Valor de salida.", "def newton(self):\n data = self.data\n tau0 = self.tau0\n a0 = self.a0\n x = np.array([np.full(10000, tau0), np.full(10000, a0)])\n x[:, 1] = x[:, 0] - np.dot(data.inv_hessian(tau0, a0), data.gradient(tau0, a0))\n tau = []\n a = []\n for i in range(0, 500):\n x[:,i] = x[:,i-1] - np.dot(data.inv_hessian(x[0, i-1], x[1, i-1]), data.gradient(x[0, i-1], x[1, i-1]))\n if data.NLL(x[0,i], x[1,i]) < data.NLL(x[0,i-1], x[1, i-1]):\n tau.append(x[:, i][0])\n a.append(x[:, i][1])\n else: \n break\n result = Parameters(tau, a, self.data, \"Newton Method\")\n return result", "def _newton(start_value, num_to_vrsqrt):\n\n x0_square = topi.multiply(start_value, start_value)\n mul_res = topi.multiply(x0_square, num_to_vrsqrt)\n mul_res = topi.multiply(mul_res, tvm.const(-1, \"float32\"))\n head0_tmp = topi.add(mul_res, tvm.const(3, \"float32\"))\n head0 = topi.multiply(head0_tmp, start_value)\n newton_res = topi.multiply(head0, tvm.const(0.5, \"float32\"))\n\n return newton_res" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts the generated fractal into an RGB image array
def _toRgbImage(self, fractal, colors, color_offset): soln_real = adjustRange(fractal[0], 0, 127) soln_imag = adjustRange(fractal[1], 0, 127) iters = adjustRange(fractal[2], 0, 128) rgb_image = np.array([ soln_real + iters, soln_imag + iters, iters ] ).astype(dtype=np.uint8) return rgb_image.T
[ "def _colored_img_to_arr(image, verbose=False):\n height, width = image.size\n arr = np.array(image.getdata())\n arr = arr.reshape(3, height, width)\n r = arr[0]\n g = arr[1]\n b = arr[2]\n return r, g, b", "def rgb_image(self):\n z3 = self.z[:,:,newaxis]\n return z3 * self.c", "def asRGB(self):\r\n\r\n width, height, pixels, meta = self.asDirect()\r\n if meta['alpha']:\r\n raise Error(\"will not convert image with alpha channel to RGB\")\r\n if not meta['greyscale']:\r\n return width, height, pixels, meta\r\n meta['greyscale'] = False\r\n typecode = 'BH'[meta['bitdepth'] > 8]\r\n\r\n def iterrgb():\r\n for row in pixels:\r\n a = array(typecode, [0]) * 3 * width\r\n for i in range(3):\r\n a[i::3] = row\r\n yield a\r\n return width, height, iterrgb(), meta", "def bgr2rgb(image):\n return image[..., [2, 1, 0]]", "def reconstructImage(self,arr):\n\t\tarr = arr * 256\n\t\tarr = np.array(np.round(arr),dtype=np.uint8)\n\t\t#arr = np.array(arr,dtype=np.uint8)\n\n\t\t# We need to transpose the array because we flatten X by columns\n\t\t#arr = arr.T\n\t\t#a = arr.reshape((self.width, self.height,3))\n\t\t\n\t\tif self.mode == 'L':\n\t\t\ta = arr.reshape((self.width, self.height))\n\t\telse:\n\t\t\ta = arr.reshape((self.width, self.height,3))\n\n\t\t#a = arr.reshape((3,self.width, self.height))\t\t\n\t\t#a = arr.transpose(0, 3, 1, 2)\n\n\t\tim = Image.fromarray(a,mode=self.mode)\n\n\t\treturn im", "def carla_rgb_image_to_ndarray(image: carla.Image) -> np.ndarray: # pylint: disable=no-member\n image.convert(carla.ColorConverter.Raw) # pylint: disable=no-member\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = array.astype(np.float32) / 255\n array = np.reshape(array, (image.height, image.width, 4))\n array = array[:, :, :3]\n array = array[:, :, ::-1]\n return array", "def get_color_image():\n\n led = LED(4)\n led.on()\n\n output = np.empty((480, 640, 3), dtype=np.uint8)\n camera.capture(output, \"rgb\")\n led.off()\n return output", "def GetRGBArray(self, p_int):\n ...", "def _get_observation_img(self):\n img_arr = self.p.getCameraImage(width=self._width + 20,\n height=self._height + 10,\n viewMatrix=self._view_matrix,\n projectionMatrix=self._proj_matrix,\n shadow=0, lightAmbientCoeff=0.6,lightDistance=100,lightColor=[1,1,1],lightDiffuseCoeff=0.4,lightSpecularCoeff=0.1,renderer=self.p.ER_TINY_RENDERER\n)\n\n rgb = img_arr[2][:-10,20:,:3]\n np_img_arr = np.reshape(rgb, (self._height, self._width, 3))\n return np_img_arr", "def get_image():\n image_response = client.simGetImages([airsim.ImageRequest(\"0\", airsim.ImageType.Scene, False, False)])[0]\n image1d = np.fromstring(image_response.image_data_uint8, dtype=np.uint8)\n image_rgba = image1d.reshape(image_response.height, image_response.width, 4)\n return image_rgba[78:144,1:255,0:3].astype(float)\n # return image_rgba[78:144,76:255,0:3].astype(float)", "def piltoarray(img):\r\n pil_image = transforms.ToPILImage()(img).convert(\"RGB\")\r\n return np.array(pil_image)", "def _decode_image(self, row_image):\n length = int(self.image_size ** 2)\n red = row_image[:length].reshape(32, 32)\n green = row_image[length:length * 2].reshape(32, 32)\n blue = row_image[length * 2:].reshape(32, 32)\n return np.stack([red, green, blue], axis=2)", "def imageToArray(i):\r\n a=gdalnumeric.numpy.fromstring(i.tostring(),'b')\r\n a.shape=i.im.size[1], i.im.size[0]\r\n return a", "def imageToArray(i):\n a=gdalnumeric.fromstring(i.tostring(),'b')\n a.shape=i.im.size[1], i.im.size[0]\n return a", "def array_to_image(arrs: np.array):\n for i, arr in enumerate(arrs):\n img = Image.fromarray(arr).convert('RGB')\n with open('imgs/{}.png'.format(i), 'wb') as f:\n img.save(f)", "def greyscale(state):\n state = np.reshape(state, [210, 160, 3]).astype(np.float32)\n\n # grey scale\n state = state[:, :, 0] * 0.299 + state[:, :, 1] * 0.587 + state[:, :, 2] * 0.114\n\n # karpathy\n state = state[35:195] # crop\n state = state[::2,::2] # downsample by factor of 2\n\n state = state[:, :, np.newaxis]\n\n return state.astype(np.uint8)", "def bgr_to_rgb(ims):\n out = []\n for im in ims:\n out.append(im[:,:,::-1])\n return out", "def convert_color(self, img, conv):", "def _read_color_image(self):\n # read raw buffer\n im_arr = self._color_stream.read_frame()\n raw_buf = im_arr.get_buffer_as_triplet()\n r_array = np.array([raw_buf[i][0] for i in range(PrimesenseSensor.COLOR_IM_WIDTH * PrimesenseSensor.COLOR_IM_HEIGHT)]) \n g_array = np.array([raw_buf[i][1] for i in range(PrimesenseSensor.COLOR_IM_WIDTH * PrimesenseSensor.COLOR_IM_HEIGHT)]) \n b_array = np.array([raw_buf[i][2] for i in range(PrimesenseSensor.COLOR_IM_WIDTH * PrimesenseSensor.COLOR_IM_HEIGHT)]) \n\n # convert to uint8 image\n color_image = np.zeros([PrimesenseSensor.COLOR_IM_HEIGHT, PrimesenseSensor.COLOR_IM_WIDTH, 3])\n color_image[:,:,0] = r_array.reshape(PrimesenseSensor.COLOR_IM_HEIGHT,\n PrimesenseSensor.COLOR_IM_WIDTH)\n color_image[:,:,1] = g_array.reshape(PrimesenseSensor.COLOR_IM_HEIGHT,\n PrimesenseSensor.COLOR_IM_WIDTH)\n color_image[:,:,2] = b_array.reshape(PrimesenseSensor.COLOR_IM_HEIGHT,\n PrimesenseSensor.COLOR_IM_WIDTH)\n if self._flip_images:\n color_image = np.flipud(color_image.astype(np.uint8))\n else:\n color_image = np.fliplr(color_image.astype(np.uint8))\n return ColorImage(color_image, frame=self._frame)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checking return values for `start` and `end` when calling channel_messages for numbers not multiples of 50.
def test_channel_messages_unlimited_pagination(): clear() userOne = auth_register('firstuser@gmail.com', '123abc!@#', 'First', 'User') randChannel = channels_create(userOne['token'], 'randChannel', True) for _ in range(149): message_send(userOne['token'], randChannel['channel_id'], 'Hello') messages = channel_messages(userOne['token'], randChannel['channel_id'], 0) assert(messages['start'] == 0) assert(messages['end'] == 50) messages2 = channel_messages(userOne['token'], randChannel['channel_id'], 50) assert(messages2['start'] == 50) assert(messages2['end'] == 100) messages3 = channel_messages(userOne['token'], randChannel['channel_id'], 100) assert(messages3['start'] == 100) assert(messages3['end'] == -1) assert(len(messages3['messages']) == 49) # an error should be raised when start is beyond 149 messages with pytest.raises(InputError): channel_messages(userOne['token'], randChannel['channel_id'], 150)
[ "def channel_messages(token, channel_id, start):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n\n # check if user is a member of channel with channel_ID and return AccessError if not\n if is_user_channel_member(channel_id, curr_id) is False:\n raise error.AccessError(description=\"user is not a member of this channel\")\n\n #get channel data\n curr_channel = database.get_channel_data(channel_id)\n # find the length of messages\n messages_length = len(curr_channel[\"messages\"])\n\n # if start is after the oldest message in messages InputError is raised\n # if messages is called and start is 0 on an empty channel, it returns an empty channel.\n # if start is after the oldest message in messages InputError is raised\n\n if messages_length <= start and (messages_length != 0 or start > 0):\n raise error.InputError(description=\"\"\"The start value selected is\n past the oldest message in the list\"\"\")\n\n if messages_length == 0 and start == 0:\n return {\"messages\": [], \"start\": start, \"end\": -1}\n\n # get the list of dictionaries 'message'\n curr_messages = curr_channel[\"messages\"]\n messages_returned = []\n\n end = start + 50\n num_msgs_to_check = messages_length - start\n\n # If end is larger than the total no. of messages,\n # the function will print till end and return -1\n if num_msgs_to_check < 50:\n\n counter = 0\n while counter < num_msgs_to_check:\n target_message_index = start + counter\n messages_returned.append(curr_messages[target_message_index])\n counter += 1\n\n end = -1\n # else if end is within total no of messages,\n # function will print 50 messaages from start and return start + 50\n else:\n # loop to add each message to return up till 50 messages is returned\n counter = 0\n while counter < 50:\n target_message_index = start + counter\n messages_returned.append(curr_messages[target_message_index])\n counter += 1\n\n for msg in messages_returned:\n for react in msg['reacts']:\n react['is_this_user_reacted'] = curr_id in react['u_ids']\n\n return {\"messages\": messages_returned, \"start\": start, \"end\": end}", "def check_range(self):\n start, end = self.ranges.get()\n for i in XRANGE(long(start), long(end)):\n if self.found:\n return\n attempt = str(i).zfill(NUM_DIGITS)\n if self.check_hash(attempt):\n self.client.send_msg(mysocket.DATA_SEPARATOR.join([mysocket.SUCCESS_REPLY, attempt]))\n self.found = True\n return", "def channel_messages(token, channel_id, start):\n # Convert 'start' to an integer\n start = int(start)\n\n # Validate token and channel ID\n if not validator.is_valid_token(token):\n raise AccessError(INVALID_TOKEN)\n if not validator.is_valid_channel(channel_id):\n raise ValueError(INVALID_CHANNEL)\n\n # Locate channel\n channel = database.get_channel_by_id(channel_id)\n\n # Validate that token user is in channel\n u_id = jwt_handler.decode_token(token)\n if u_id not in channel[\"auth_ids\"]:\n raise AccessError(CHANNEL_CANT_VIEW_MSG)\n\n # Validate messages exist\n total_messages = len(channel['messages'])\n if total_messages <= 0:\n return {\n \"messages\": [],\n \"start\": start,\n \"end\": -1\n }\n\n # Validate start point\n if start > (total_messages - 1) or start < 0:\n raise ValueError(CHANNEL_NO_MORE_MSG)\n\n # Find index for the most recent message (reference list backwards)\n start_index = len(channel['messages']) - 1 - start\n # Get all recent messages up to 50\n messages = []\n end = start + 49\n for msg_num in range(50):\n index = start_index - msg_num\n # If there are less than 50 msgs\n if index < 0:\n end = -1\n break\n\n # Get message object\n message_id = channel['messages'][index]\n message = database.get_message_by_id(message_id)\n\n # If message is sent later or not there\n if message is None or\\\n json_time_translator.json_to_datetime(message[\"time_created\"]) >= datetime.utcnow():\n continue\n\n # Create output-able list of reacts\n react_list = []\n reacted_ids = []\n all_reacts = database.get_all_reacts(message_id)\n react_id = 1\n for react in all_reacts:\n is_this_user_reacted = False\n if react[\"react_id\"] == react_id:\n reacted_ids.extend(react[\"u_id\"])\n if u_id in react[\"u_id\"]:\n is_this_user_reacted = True\n # Only add the reaction if it has at least one count of a user\n react_list.append({\n \"react_id\": react_id,\n \"u_ids\": reacted_ids,\n \"is_this_user_reacted\": is_this_user_reacted\n })\n\n # print(message[\"time_created\"])\n # print(json_time_translator.json_to_datetime(message[\"time_created\"]))\n # print(json_time_translator.json_to_timestamp(message[\"time_created\"]))\n # Append to file\n messages.append({\n \"message_id\": message_id,\n \"u_id\": message[\"u_id\"],\n \"message\": message[\"message\"],\n \"time_created\": json_time_translator.json_to_timestamp(message[\"time_created\"]),\n \"reacts\": react_list,\n \"is_pinned\": message[\"is_pinned\"]\n })\n\n return {\n \"messages\": messages,\n \"start\": start,\n \"end\": end\n }", "def test_load_channels_with_number_above_upper_bound():\n try:\n switch.load_channels(\"200\", upper_bound=99)\n assert False, \"load_channels() should have failed\"\n except Exception as error:\n assert isinstance(error, switch.LoadError)\n assert \"must have fewer than 100 channels\" in error.message", "def test_call_limits(self, data, start, stop):\n filter = RangeFilter(\"value\", start, stop)\n if start is None:\n expect = data if data[\"value\"] < stop else None\n elif stop is None:\n expect = data if start <= data[\"value\"] else None\n else:\n assert False # shouldn't get here\n assert expect == filter(data)\n return", "def check_channel_limit(self):\n\n db = database.Database(config.sqlite_path)\n metric_dict = db.get_channel_count_metric()\n if len(metric_dict.keys()) > 105:\n for dest in config.channels_admin:\n if not dest.startswith('#'):\n dest = \"#\" + dest\n logging.warning(\"!!!--- \"+ str(dest))\n db.enqueue_msg(dest, \"ALERT! Channel Limit Approaching\")", "def check_availability(self, start, end):\n if start < end:\n return 0\n else:\n print(self.buffer.shape)\n return\n # if sampleStart < 0 or sampleEnd <= 0:\n # return 5\n # if sampleEnd > self.nSamplesWritten:\n # return 3 # data is not ready\n # if (self.nSamplesWritten - sampleStart) > self.bufSize:\n # return 2 # data is already erased\n #\n # return 0", "def testNumMessagesInConversation(self):\n\n # Make sure we get the last message\n expected = len(self.msg_keys)\n actual = gcimessage_logic.numMessagesInConversation(\n conversation=self.conv.key)\n self.assertEqual(expected, actual)\n\n # Add a new message, one minute after the last one\n self.conv_utils.addMessage(\n conversation=self.conv.key,\n time=(self.msg_keys[-1].get().sent_on + timedelta(minutes=1)))\n\n # Make sure we get the new last message\n expected = len(self.msg_keys) + 1\n actual = gcimessage_logic.numMessagesInConversation(\n conversation=self.conv.key)\n self.assertEqual(expected, actual)", "def test_limit_and_from(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?from=5&limit=10\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(channel.json_body[\"next_token\"], 15)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 10)\n self._check_fields(channel.json_body[\"event_reports\"])", "def test_standup_send_length():\n\n user = auth.auth_register(\"email@email.com\", \"password\", \"Samus\", \"Aran\")\n channel_dict = channels.channels_create(user['token'], \"test_channel\", False)\n\n standup.standup_start(user['token'], channel_dict['channel_id'], 1)\n\n with pytest.raises(error.InputError):\n standup.standup_send(user['token'], channel_dict['channel_id'], 'a' * 1001)\n time.sleep(1)", "def test_messages_wrapped(self):\n\n self.assertEqual(self.iterations + 1, len(self.messages))", "def test_args_count_in_range(args: list, min: int, max: int) -> bool:\n\n\tcount = args_count(args)\n\treturn (count >= min and count <= max)", "def test_hello_failed_code_value(self):\n\n value = 0\n\n iter_given_code = self.test_hello_failed_code.__iter__()\n length = self.test_hello_failed_code.__len__()\n\n while value < self.MAX_HELLO_FAILED_CODE_VALUE or length > 0:\n\n self.assertEqual(value, iter_given_code.__next__())\n\n if value < self.MAX_HELLO_FAILED_CODE_VALUE:\n value += 1\n\n length -= 1", "def test_check_limit(self):\n self.assertEqual(functions.check_limit('2'), 2)", "def test_chunk_message_data_chunks_data_for_n(self):\n articles = [fake for fake in range(16)]\n expected = [[n for n in range(8)], [n for n in range(8, 16)]]\n tested = self.client._chunk_message_data(articles, 8)\n assert tested == expected", "def check_if_50s_and_20s(self,value):\n if (value%50)%20!=0:\n return 1\n else:\n return 0", "def test_session5_squared_power_list_start_gt_end():\n with pytest.raises(ValueError, match=r\".*Value of start should be less than end*\"):\n session5.squared_power_list(2, start=9, end=1)", "def _send_messages(number_range, partition=0, topic=topic, producer=kafka_producer, request=request):\n messages_and_futures = [] # [(message, produce_future),]\n for i in number_range:\n # request.node.name provides the test name (including parametrized values)\n encoded_msg = '{}-{}-{}'.format(i, request.node.name, uuid.uuid4()).encode('utf-8')\n future = kafka_producer.send(topic, value=encoded_msg, partition=partition)\n messages_and_futures.append((encoded_msg, future))\n kafka_producer.flush()\n for (msg, f) in messages_and_futures:\n assert f.succeeded()\n return [msg for (msg, f) in messages_and_futures]", "def test_get_inbound_for_time_range_no_end(self):\n yield self.start_server()\n batch_id = yield self.make_batch(('foo', 'bar'))\n mktime = lambda day: datetime(2014, 11, day, 12, 0, 0)\n yield self.make_inbound(batch_id, 'føø', timestamp=mktime(1))\n msg2 = yield self.make_inbound(batch_id, 'føø', timestamp=mktime(2))\n msg3 = yield self.make_inbound(batch_id, 'føø', timestamp=mktime(3))\n msg4 = yield self.make_inbound(batch_id, 'føø', timestamp=mktime(4))\n resp = yield self.make_request(\n 'GET', batch_id, 'inbound.json', start='2014-11-02 00:00:00')\n messages = map(\n json.loads, filter(None, resp.delivered_body.split('\\n')))\n self.assertEqual(\n set([msg['message_id'] for msg in messages]),\n set([msg2['message_id'], msg3['message_id'], msg4['message_id']]))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checking if the user is already in the channel, raise accesserror if they are
def test_channel_join_already_in_channel(): clear() user = auth_register('user@gmail.com', '123abc!@#', 'first', 'last') userchannel_id = channels_create(user['token'], 'userchannel', True) with pytest.raises(AccessError): channel_join(user['token'], userchannel_id['channel_id'])
[ "def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if chan['user'] == user:\n return True\n return False", "def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if \"user\" in chan and chan['user'] == user:\n return True\n return False", "def check_user(msg):\n if \"Error\" in msg:\n raise ValueError('User already exists.')", "def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def in_channel(name):\n def predicate(ctx):\n if ctx.message.channel.is_private:\n return False\n\n # Adapted from discord.ext.core.has_any_role\n user_roles = functools.partial(discordutils.get, ctx.message.author.roles)\n if any(user_roles(name=role) is not None for role in ['Admin', 'Moderator', 'Support']):\n return True\n\n return ctx.message.channel.name == name\n\n return commands.check(predicate)", "def test_channel_addowner_already_an_owner():\n clear()\n auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def user_exists(userid):", "def check_channel_request(self, kind, chanid):\n return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED", "async def channel_manage_error(self, ctx: commands.context, error):\n if isinstance(error, commands.ChannelNotFound):\n await ctx.send(\"That channel was not found, make sure the channel exists.\")\n else:\n logging.warning(error)", "def test_message_send_not_in_c():\n user_infor1 = auth_register(\"337992611@gamil.com\", \"ccc337992611\", \"Min\", \"Li\")\n user_infor2 = auth_register('testmeplz@gmail.com', 'ccc337992611', 'A', 'H')\n\n channel_infor_2 = channels_create(user_infor2['token'], 'test_two', True)\n\n with pytest.raises(AccessError):\n message_send(user_infor1['token'], channel_infor_2['channel_id'],\n 'a'*100)", "def userLeft(self, user, channel):\n pass", "def current_user_has_access(self):\n return self.user_has_access(users.get_current_user())", "async def interaction_check(self, interaction: discord.Interaction):\n owner_id = interaction.guild.owner.id\n if interaction.user.id not in (\n owner_id,\n *interaction.client.owner_ids,\n ):\n await interaction.response.send_message(\n content=_(\"You are not authorized to interact with this.\"), ephemeral=True\n )\n return False\n return True", "def test_standup_send_not_member():\n\n user1 = auth.auth_register(\"email1@email.com\", \"password\", \"Night\", \"Man\")\n user2 = auth.auth_register(\"email2@email.com\", \"password\", \"Day\", \"Man\")\n channel_dict = channels.channels_create(user1['token'], \"test_channel\", False)\n\n standup.standup_start(user1['token'], channel_dict['channel_id'], 1)\n\n with pytest.raises(error.AccessError):\n standup.standup_send(user2['token'], channel_dict['channel_id'], \"message\")\n time.sleep(1)", "def already_logged_in(oauth_user, oauth_service):\n try:\n created = current_user.add_oauth_identity(oauth_user.service_name, oauth_user.service_user_id)\n if created:\n message = 'Linked your ' + oauth_service.value + ' account to your CatHerder account!'\n else:\n message = 'Your ' + oauth_service.value + ' account is already linked to a CatHerder user.'\n return current_user, message, True\n except Exception as e:\n return None, e.message, False", "def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator", "def vc_only():\n\n async def check(ctx):\n if ctx.guild and ctx.author.voice:\n if not ctx.guild.me.voice or ctx.author.voice.channel == ctx.guild.me.voice.channel:\n return True\n await ctx.reply(\"I'm already in another voice channel!\")\n return False\n await ctx.reply('You must join a server voice channel first!')\n return False\n\n return commands.check(check)", "def channel_addowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n # check if user u_id is already an owner of the channel and raise InputError if so\n # also checks to see if current auth user is a owner of channel\n\n # a counter to check if user is a member of the channel\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n raise error.InputError(description=\"user u_id is already an owner of this channel\")\n # checks if curr_id is an owner of channel\n if curr_id == owner_id:\n is_curr_owner = True\n\n # checks if the user u_id is a member of the channel already\n is_u_member = False\n for member_id in curr_channel[\"member_ids\"]:\n if u_id == member_id:\n is_u_member = True\n\n\n # if the auth user is an owner of the slackr, allow him to add u_id as owner of channel\n if is_u_member is True:\n if user_perms[\"permission_id\"] == 1:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # if the auth user is an owner of the channel, allow him to add u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"current user is not an owner of the channel,\n or of the slackr\"\"\")", "def test_channel_removeowner_invalid_user_id():\n clear()\n auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], \"invalidemail@gmail.com\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
if the channel is private, but no invite is given to the user, then the owner of flockr can join the channel
def test_channel_join_private_owner(): clear() joiner = auth_register('joiner@gmail.com', '123abc!@#', 'first', 'last') user = auth_register('user@gmail.com', '123abc!@#', 'first', 'last') userchannel_id = channels_create(user['token'], 'userchannel', False) channel_join(joiner['token'], userchannel_id['channel_id']) randChannel_details = channel_details(user['token'], userchannel_id['channel_id']) assert(randChannel_details['all_members'] == [ { 'u_id' : user['u_id'], 'name_first' : 'first', 'name_last' : 'last', 'profile_img_url': '' }, { 'u_id' : joiner['u_id'], 'name_first' : 'first', 'name_last' : 'last', 'profile_img_url': '' } ])
[ "async def join(self, ctx, invite : discord.Invite):\r\n if ctx.message.author.id == \"481270883701358602\":\r\n await self.client.accept_invite(invite)\r\n await self.client.say(\"Joined the server.\")\r\n else:\r\n await self.client.say(\"**Owner only command.**\")", "def channel_join(token, channel_id):\n\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n # checks if user is already a part of channel\n for user_id in curr_channel[\"member_ids\"]:\n if curr_id == user_id:\n raise error.InputError(description=\"user is joining a channel user is already in\")\n\n # this checks if the channel is empty (or new) in this case we make the new member an owner.\n if curr_channel[\"member_ids\"] == []:\n # adds the user into channel_member\n curr_channel[\"member_ids\"].append(curr_id)\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(curr_id)\n # this checks if the user is an owner of the slacker\n # if they are they are given owner privelages in the channel\n # else they are a member\n elif user_perms[\"permission_id\"] == 1:\n # adds the user into channel_member\n curr_channel[\"member_ids\"].append(curr_id)\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(curr_id)\n elif curr_channel[\"is_public\"] is True:\n # adds the user into the channel_member\n curr_channel[\"member_ids\"].append(curr_id)\n elif curr_channel[\"is_public\"] is False:\n raise error.InputError(description=\"\"\"channel_join recieved a channel_id\n for a private channel\"\"\")", "def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if chan['user'] == user:\n return True\n return False", "def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if \"user\" in chan and chan['user'] == user:\n return True\n return False", "def channel_invite(token, channel_id, u_id):\n authorised_u_id = get_id_from_token(token)\n channel = channels.get(channel_id)\n user_to_invite = users.get(u_id)\n if user_to_invite is None:\n raise ValueError(\"u_id does not exist.\")\n if channel is None:\n raise ValueError(\"channel_id does not exist.\")\n if authorised_u_id not in channel[\"all_members\"]:\n raise AccessError(\"The authorised user is not a member of the channel.\")\n channels.set(channel_id, \"all_members\", u_id)", "def channel_invite(token, channel_id, u_id):\n\n if database.get_current_user(token) not in database.get_channel_data(channel_id)['member_ids']:\n raise error.AccessError(description=\"\"\"Authorised user is not\n a member of channel with that channel_id.\"\"\")\n if u_id in database.get_channel_data(channel_id).get('member_ids'):\n raise error.InputError(description=\"This user is already a part of the channel.\")\n\n new_channel_data = database.get_channel_data(channel_id)\n\n new_channel_data['member_ids'].append(u_id)\n if database.get_permission_dict(u_id).get('permission_id') == 1:\n new_channel_data['owner_ids'].append(u_id)\n\n database.set_channel_data(new_channel_data)\n\n return {}", "def test_channel_join_already_in_channel():\n clear()\n user = auth_register('user@gmail.com', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n with pytest.raises(AccessError):\n channel_join(user['token'], userchannel_id['channel_id'])", "def in_channel(name):\n def predicate(ctx):\n if ctx.message.channel.is_private:\n return False\n\n # Adapted from discord.ext.core.has_any_role\n user_roles = functools.partial(discordutils.get, ctx.message.author.roles)\n if any(user_roles(name=role) is not None for role in ['Admin', 'Moderator', 'Support']):\n return True\n\n return ctx.message.channel.name == name\n\n return commands.check(predicate)", "def channel_invite(token, channel_id, u_id):\n # Convert channel_id and u_id to integer\n channel_id = int(channel_id)\n u_id = int(u_id)\n\n # Validate token, channel and user\n if not validator.is_valid_token(token):\n raise AccessError(INVALID_TOKEN)\n if not validator.is_valid_channel(channel_id):\n raise ValueError(INVALID_CHANNEL)\n if not validator.is_valid_user(u_id):\n raise ValueError(INVALID_USER)\n\n # Find invitor\n invitor_id = jwt_handler.decode_token(token)\n\n # Locate channel in database\n channel = database.get_channel_by_id(channel_id)\n\n # Check that the invitor is authorised\n auth_list = channel[\"auth_ids\"]\n\n if invitor_id not in auth_list:\n raise AccessError(CHANNEL_INV_NO_AUTH)\n if u_id in auth_list:\n raise ValueError(CHANNEL_ALREADY_JOINED)\n auth_list.append(u_id)\n database.update_channel_by_id(channel_id, {\n \"auth_ids\": auth_list\n })\n return {}", "def join(self):\n channel = self.data[0]\n user_pseudonym = VALIDATED_USERS.get_pseudonym(SOCKET_TO_USERID.get(self.source, None))\n\n if user_pseudonym and self.target:\n target_server = self.target[1]\n if(BANHANDLER.is_banned_from_channel(user_pseudonym, target_server, channel)):\n self.source[0].send(\":orcbot!~@localhost PRIVMSG \"+SOCKET_TO_USERID[self.source]+\" :You're banned from \"+channel+\"\\r\\n\")\n elif(self.target):\n self.message = self.message +\"\\r\\n\"\n self.target[0].sendall(self.message)\n self.send()", "def channel_addowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n # check if user u_id is already an owner of the channel and raise InputError if so\n # also checks to see if current auth user is a owner of channel\n\n # a counter to check if user is a member of the channel\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n raise error.InputError(description=\"user u_id is already an owner of this channel\")\n # checks if curr_id is an owner of channel\n if curr_id == owner_id:\n is_curr_owner = True\n\n # checks if the user u_id is a member of the channel already\n is_u_member = False\n for member_id in curr_channel[\"member_ids\"]:\n if u_id == member_id:\n is_u_member = True\n\n\n # if the auth user is an owner of the slackr, allow him to add u_id as owner of channel\n if is_u_member is True:\n if user_perms[\"permission_id\"] == 1:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # if the auth user is an owner of the channel, allow him to add u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"current user is not an owner of the channel,\n or of the slackr\"\"\")", "def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def is_user_channel_member(channel_id, u_id):\n for selected_id in database.get_channel_data(channel_id)[\"member_ids\"]:\n if selected_id == u_id:\n return True\n return False", "async def invite(self, ctx):\r\n await self.bot.send('debug', embed=self.bot.buildEmbed(title=\"Invite Request\", description=\"{} ▫️ {}\".format(ctx.author.name, ctx.author.id), thumbnail=ctx.author.avatar_url, timestamp=datetime.utcnow(), color=self.color))\r\n await ctx.author.send(embed=self.bot.buildEmbed(title=ctx.guild.me.name, description=\"{}\\nYou'll have to wait for my owner approval.\\nMisuses will result in a ban.\".format(self.bot.strings[\"invite()\"]), thumbnail=ctx.guild.me.avatar_url, timestamp=datetime.utcnow(), color=self.color))", "def user_is_channel_member(channel_id: int, auth_user_id: int) -> bool: \n channel = get_channel(channel_id)\n for user in channel['all_members']:\n if auth_user_id == user['u_id']:\n return True\n return False", "def userJoined(self, user, channel):\n pass", "def is_channel_owner():\n\n async def check(ctx):\n if ctx.guild:\n owner = ctx.author == ctx.guild.owner\n if not owner:\n await ctx.send(\"I guess you are not this server's pogchamp. Bruh.\")\n return owner\n return True\n\n return commands.check(check)", "def test_invite_accept_other_user(self):\n i = InviteFactory.create()\n\n response = self.client.post(reverse('resturo_organization_join'),\n {\"token\": i.token,\n \"action\": JoinSerializer.JOIN_ACCEPT})\n\n self.assertEquals(response.status_code, status.HTTP_200_OK)\n # There should be a membership now\n self.assertTrue(Membership.objects.filter(user=self.u,\n organization=i.organization\n ).exists())\n # The invite should be gone\n self.assertEquals(Invite.objects.count(), 0)", "def _on_join(self, connection, event):\n channel = event.target\n nick = event.source.nick\n # if it's the bot itself joining the channel\n if nick == connection.get_nickname():\n # create a new channel in the channels dictionary\n self.channels[event.target] = IRCChannel(ircbot=self, name=channel)\n self.channels[event.target].showbans = self.settings['showbans']\n self.channels[event.target].showgame = self.settings['showgame']\n self.channels[event.target].showkicks = self.settings['showkicks']\n\n # add the user to the channel user list\n self.channels[channel].add_user(nick=nick)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checking if an inputerror is raised if attempting to add a user as an owner who is already an owner
def test_channel_addowner_already_an_owner(): clear() auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen') channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id']) with pytest.raises(InputError): assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])
[ "def clean_owner(self):\n username = self.cleaned_data['owner']\n owner = User.objects.filter(username=username).first()\n if owner is None:\n raise forms.ValidationError(\n _('User %(username)s does not exist'),\n params={'username': username},\n )\n if self.organization.owners.filter(username=username).exists():\n raise forms.ValidationError(\n _('User %(username)s is already an owner'),\n params={'username': username},\n )\n return owner", "def verifyOwner(owner_id, this_user_id):\n if not (owner_id == this_user_id):\n raise ValueError(\n \"You don't have permission to edit this.\")", "def test_input_admin_owner_change_first_owner_to_member(user_1, user_2):\n admin_userpermission_change(user_1[\"token\"], user_2[\"u_id\"], OWNER)\n with pytest.raises(InputError):\n admin_userpermission_change(user_2[\"token\"], user_1[\"u_id\"], MEMBER)\n clear()", "def test_input_admin_first_owner_changes_to_member(user_1):\n with pytest.raises(InputError):\n admin_userpermission_change(user_1[\"token\"], user_1[\"u_id\"], MEMBER)\n clear()", "def test_username_not_unique(bot):\n expect_error(register, InputError, bot.username, \"abcdef\", \"a\", \"a\", \"a\")", "def check_user(msg):\n if \"Error\" in msg:\n raise ValueError('User already exists.')", "def validate_owner(ctx, param, value):\n # pylint: disable=unused-argument\n form = \"OWNER\"\n return validate_slashes(param, value, minimum=1, maximum=1, form=form)", "def validate_username(self, username):\n user_obj = Users.query.filter_by(Username=username.data).first()\n if user_obj:\n raise ValidationError(\"Username Already Exists. Choose Another.\")", "def _validate_ip_owner(ip, mac, row_number):\n mac = MACAddressField.normalize(mac)\n try:\n dev = Device.admin_objects.get(ethernet__mac=mac)\n except Device.DoesNotExist:\n if ip_address_exists(ip):\n raise forms.ValidationError(\n \"Row %s: IP address already exists.\" % row_number\n )\n else:\n # Does another device have this IPAddress?\n if(Device.objects.filter(\n ipaddress__number=int(ipaddr.IPAddress(ip)),\n ).exclude(\n pk=dev.id,\n ).exists()):\n raise forms.ValidationError(\n \"Row %s: IP address used by another device.\" % row_number\n )", "def validate_owner(model, request):\n auth_token = request.headers.get('Authentication-Token')\n user = _token_loader(auth_token)\n if model.owner != user:\n abort(401)", "def ownercheck(self, userhost):\n if self.cfg and self.cfg.owner:\n if userhost in self.cfg.owner: return True\n return False", "def test_05_add_duplicate_user(self):\n\n # Validate the following\n # 1. Enable a VPN connection on source NAT\n # 2. Add a VPN user say \"abc\" that already an added user to the VPN.\n # 3. Adding this VPN user should fail.\n\n self.debug(\"Enabling the VPN connection for IP: %s\" %\n self.public_ip.ipaddress)\n self.create_VPN(self.public_ip)\n\n self.debug(\"Adding new VPN user to account: %s\" %\n self.account.name)\n self.create_VPN_Users(rand_name=False)\n\n # TODO: Verify the VPN connection\n self.debug(\"Adding another user to account with same username\")\n with self.assertRaises(Exception):\n self.create_VPN_Users(rand_name=False)\n return", "def _is_username_taken(self, username):\n\t\tfor e_o in self._entity_owners:\n\t\t\tif e_o.username == username:\n\t\t\t\treturn us.SUCCESS_MESSAGE\n\t\treturn us.ERROR_MESSAGE", "def test_validate_owner(self):\n with self.assertRaises(ValidationError):\n self.make_assignment(self.category, self.user_bob, self.role_owner)", "def validate_username(self, data, value):\n\n store = goldman.sess.store\n existing = store.find(data['rtype'], 'username', value)\n\n if existing:\n if not data['rid'] or data['rid'] != existing.rid:\n raise ValidationError('username is already taken')", "def _check_owner(user, study):\n if not user.id == study.owner:\n raise HTTPError(403, \"User %s does not own study %d\" %\n (user.id, study.id))", "def username_entered(username):\n if username not in users:\n users.append(username)", "def is_owner(ctx):\n return ctx.author.id in owners", "async def interaction_check(self, interaction: discord.Interaction):\n owner_id = interaction.guild.owner.id\n if interaction.user.id not in (\n owner_id,\n *interaction.client.owner_ids,\n ):\n await interaction.response.send_message(\n content=_(\"You are not authorized to interact with this.\"), ephemeral=True\n )\n return False\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checking if AccessError is returned as expected if the owner of flockr is not a member of the channel
def test_channel_addowner_owner_flockr_not_member(): clear() register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) with pytest.raises(AccessError): assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])
[ "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_channel_removeowner_not_owner_permissions():\n clear()\n auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def test_lock_not_owned(self):\n response = self.app.get('/check/321', headers=self.auth_header(\"test@mail.com\", \"python\"))\n self.assertEqual(403, response.status_code)", "def test_util_has_perm_or_owns_sanity(self):\n me = User.objects.get(pk=118533)\n my_t = Thread.objects.filter(creator=me)[0]\n other_t = Thread.objects.exclude(creator=me)[0]\n perm = 'forums_forum.thread_edit_forum'\n allowed = access.has_perm_or_owns(me, perm, my_t, self.forum_1)\n eq_(allowed, True)\n allowed = access.has_perm_or_owns(me, perm, other_t, self.forum_1)\n eq_(allowed, False)", "def test_util_has_perm_or_owns_sanity(self):\n from kitsune.forums.tests import ThreadFactory\n me = UserFactory()\n my_t = ThreadFactory(creator=me)\n other_t = ThreadFactory()\n perm = 'forums_forum.thread_edit_forum'\n allowed = access.has_perm_or_owns(me, perm, my_t, my_t.forum)\n eq_(allowed, True)\n allowed = access.has_perm_or_owns(me, perm, other_t, other_t.forum)\n eq_(allowed, False)", "def is_channel_owner():\n\n async def check(ctx):\n if ctx.guild:\n owner = ctx.author == ctx.guild.owner\n if not owner:\n await ctx.send(\"I guess you are not this server's pogchamp. Bruh.\")\n return owner\n return True\n\n return commands.check(check)", "def test_channel_addowner_already_an_owner():\n clear()\n auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_channel_removeowner_owner_flockr():\n clear()\n register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def my_owner():\n async def predicate(ctx):\n return ctx.guild is not None and ctx.me.guild_permissions.read_messages \\\n and ctx.author.id == 688917796943560734\n return commands.check(predicate)", "def test_channel_join_already_in_channel():\n clear()\n user = auth_register('user@gmail.com', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n with pytest.raises(AccessError):\n channel_join(user['token'], userchannel_id['channel_id'])", "def test_channel_join_private_owner():\n clear()\n joiner = auth_register('joiner@gmail.com', '123abc!@#', 'first', 'last')\n user = auth_register('user@gmail.com', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', False)\n channel_join(joiner['token'], userchannel_id['channel_id']) \n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['all_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n },\n {\n 'u_id' : joiner['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])", "async def interaction_check(self, interaction: discord.Interaction):\n owner_id = interaction.guild.owner.id\n if interaction.user.id not in (\n owner_id,\n *interaction.client.owner_ids,\n ):\n await interaction.response.send_message(\n content=_(\"You are not authorized to interact with this.\"), ephemeral=True\n )\n return False\n return True", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def _check_access(self):\r\n return self._is_access_free", "def test_pin_msg_not_owner():\n data = helper_create_react()\n # Create second user and join the channel with the message.\n user_infor = auth_register('337992611@qq.com', 'HereyouAreMyP', 'not', 'exit')\n channel_join(user_infor['token'], data['channel_id'])\n with pytest.raises(AccessError):\n message_pin(user_infor['token'], data['message_id'])", "def check_channel_request(self, kind, chanid):\n return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED", "def ccheck(self, msg):\r\n if msg.channel == self.channel or (msg.channel.is_private and self.ispm):\r\n return True\r\n return False", "async def lock(ctx):\n member = ctx.message.author\n channel = ctx.message.channel\n\n if (channel.category.name in [\"beta\", \"staff\", \"Pi-Bot\"]):\n return await ctx.send(\"This command is not suitable for this channel because of its category.\")\n\n member_role = discord.utils.get(member.guild.roles, name=ROLE_MR)\n if (channel.category.name == CATEGORY_STATES):\n await ctx.channel.set_permissions(member_role, add_reactions=False, send_messages=False)\n else:\n await ctx.channel.set_permissions(member_role, add_reactions=False, send_messages=False, read_messages=True)\n\n wiki_role = discord.utils.get(member.guild.roles, name=ROLE_WM)\n gm_role = discord.utils.get(member.guild.roles, name=ROLE_GM)\n admin_role = discord.utils.get(member.guild.roles, name=ROLE_AD)\n bot_role = discord.utils.get(member.guild.roles, name=ROLE_BT)\n await ctx.channel.set_permissions(wiki_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(gm_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(admin_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(bot_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.send(\"Locked the channel to Member access.\")", "def test_edit_msg_no_access():\n data = create_pinned_message()\n user2 = auth_register(\"ema222@email.com\", \"password\", \"Bill\", \"Bill\")\n channel_join(user2['token'], data['channel_id'])\n with pytest.raises(AccessError):\n message_edit(user2['token'], data['message_id'], \"test\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checking if removing an owner with an invalid user ID raises an inputerror
def test_channel_removeowner_invalid_user_id(): clear() auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen') with pytest.raises(InputError): assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], "invalidemail@gmail.com")
[ "def verifyOwner(owner_id, this_user_id):\n if not (owner_id == this_user_id):\n raise ValueError(\n \"You don't have permission to edit this.\")", "def _remove_user(self):\n name = False\n while not name: #While name not set\n name = input(\"Please enter the username of the user you would like to remove: \").lower()\n userID = self._get_user_id(name)\n if not userID:\n name = False\n command = \"remove_user {0}\\r\\n\".format(userID)\n return(command)", "def test_cannot_remove_sole_owner(self):\n # Get the current sole owner for the project\n collaborator = self.project.collaborators.filter(role = Collaborator.Role.OWNER).first()\n # Authenticate as the owner from the discovered collaborator\n self.client.force_authenticate(user = collaborator.user)\n # Trying to delete the owner record should result in a conflict\n response_data = self.assertConflict(\n \"/collaborators/{}/\".format(collaborator.pk),\n \"DELETE\"\n )\n # Check that the error code is what we expected\n self.assertEqual(response_data['code'], 'sole_owner')\n # Test that the collaborator was not removed\n self.assertTrue(Collaborator.objects.filter(pk = collaborator.pk).exists())", "def test_leave_single_owner_denied(member_service, community, owner):\n data = {\"members\": [{\"type\": \"user\", \"id\": str(owner.id)}]}\n pytest.raises(\n ValidationError,\n member_service.delete,\n owner.identity,\n community._record.id,\n data,\n )", "def dropUser(userID):", "def remove():\r\n ch = input('You are about to REMOVE an entry. If NO, you may choose another option.\\n').lower()\r\n\r\n if y_n(ch):\r\n print('Enter info for the following fields...\\n')\r\n xln = re.sub(r'\\s', '', str(input('Last name?\\n'))).lower().capitalize()\r\n xfn = re.sub(r'\\s', '', str(input('First name?\\n'))).lower().capitalize()\r\n\r\n if not search2(xln, xfn):\r\n print('No entry exists for', xfn, xln, end='. Please enter another entry.\\n')\r\n return remove()\r\n\r\n ch2 = input('Are you sure you wish to remove this individual from the database? YES or NO?\\n')\r\n if y_n(ch2):\r\n print(xfn, xln, 'has been removed from the database.')\r\n with conn:\r\n c.execute(\"\"\"DELETE from personnel WHERE first=:first COLLATE NOCASE and last=:last COLLATE NOCASE\"\"\",\r\n {'first': xfn, 'last': xln})\r\n\r\n start()\r\n else:\r\n print('Your remove action has been cancelled.')\r\n start()\r\n else:\r\n start()", "def test_channel_removeowner_not_owner_permissions():\n clear()\n auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def clean_owner(self):\n username = self.cleaned_data['owner']\n owner = User.objects.filter(username=username).first()\n if owner is None:\n raise forms.ValidationError(\n _('User %(username)s does not exist'),\n params={'username': username},\n )\n if self.organization.owners.filter(username=username).exists():\n raise forms.ValidationError(\n _('User %(username)s is already an owner'),\n params={'username': username},\n )\n return owner", "def delete_user():", "def validate_owner(model, request):\n auth_token = request.headers.get('Authentication-Token')\n user = _token_loader(auth_token)\n if model.owner != user:\n abort(401)", "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def validate_owner(ctx, param, value):\n # pylint: disable=unused-argument\n form = \"OWNER\"\n return validate_slashes(param, value, minimum=1, maximum=1, form=form)", "def test_can_remove_owner_when_multiple_owners(self):\n # Make an extra owner for the project\n collaborator = self.project.collaborators.create(\n user = get_user_model().objects.create_user('owner2'),\n role = Collaborator.Role.OWNER\n )\n # Authenticate the client as an owner of the project\n self.authenticateAsProjectOwner(collaborator.project)\n # Try to delete them\n self.assertDeleteResponseIsEmpty(\"/collaborators/{}/\".format(collaborator.pk))\n # Test that the collaborator was removed\n self.assertFalse(Collaborator.objects.filter(pk = collaborator.pk).exists())", "def test_input_admin_owner_change_first_owner_to_member(user_1, user_2):\n admin_userpermission_change(user_1[\"token\"], user_2[\"u_id\"], OWNER)\n with pytest.raises(InputError):\n admin_userpermission_change(user_2[\"token\"], user_1[\"u_id\"], MEMBER)\n clear()", "def test_input_admin_first_owner_changes_to_member(user_1):\n with pytest.raises(InputError):\n admin_userpermission_change(user_1[\"token\"], user_1[\"u_id\"], MEMBER)\n clear()", "def _delete_entity_owner(self, username):\n\t\t#us.log('Entity server is deleting the following owner : { ' + username + ' }')\n\t\towners_to_remove = []\n\t\tfor e_o in self._entity_owners:\n\t\t\tif e_o.username == username:\n\t\t\t\towners_to_remove.append(e_o)\n\t\tfor e_o in owners_to_remove:\n\t\t\tself._entity_owners.remove(e_o)\n\t\tself._db_api.delete_owner(username)\n\t\treturn us.SUCCESS_MESSAGE", "def handle_owner_delete(owner_id):\n\n owner = Owner.find_by_id(owner_id)\n # flash error message if owner does not exist\n if not owner:\n flash(f'Owner does not exist!', 'danger')\n return 'not deleted', 404\n # flash error message if owner still has existing content\n elif owner.contents:\n flash(f'{owner.owner_name} still has existing content!', 'danger')\n return 'not deleted', 400\n\n # owner is deleted and user is redirected (redirect code in owners.js)\n # deleting owner errors handled\n try:\n owner.delete_owner()\n except HTTPException:\n return \"Server cannot delete the owner at this time\", 500\n\n flash(f'{owner.owner_name} has been deleted!', 'success')\n return 'deleted', 202", "def test_not_member(bot, event):\n _, event_id = event\n expect_error(edit, InputError, bot.username, event_id, False, None, None)", "def test_channel_addowner_already_an_owner():\n clear()\n auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checking if removing an owner without owner permissions raises an accesserror
def test_channel_removeowner_not_owner_permissions(): clear() auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) with pytest.raises(AccessError): assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])
[ "def test_cannot_remove_sole_owner(self):\n # Get the current sole owner for the project\n collaborator = self.project.collaborators.filter(role = Collaborator.Role.OWNER).first()\n # Authenticate as the owner from the discovered collaborator\n self.client.force_authenticate(user = collaborator.user)\n # Trying to delete the owner record should result in a conflict\n response_data = self.assertConflict(\n \"/collaborators/{}/\".format(collaborator.pk),\n \"DELETE\"\n )\n # Check that the error code is what we expected\n self.assertEqual(response_data['code'], 'sole_owner')\n # Test that the collaborator was not removed\n self.assertTrue(Collaborator.objects.filter(pk = collaborator.pk).exists())", "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def has_remove_permissions(self, obj):\n return True", "def test_can_remove_owner_when_multiple_owners(self):\n # Make an extra owner for the project\n collaborator = self.project.collaborators.create(\n user = get_user_model().objects.create_user('owner2'),\n role = Collaborator.Role.OWNER\n )\n # Authenticate the client as an owner of the project\n self.authenticateAsProjectOwner(collaborator.project)\n # Try to delete them\n self.assertDeleteResponseIsEmpty(\"/collaborators/{}/\".format(collaborator.pk))\n # Test that the collaborator was removed\n self.assertFalse(Collaborator.objects.filter(pk = collaborator.pk).exists())", "def __check_removed_permissions(self) -> None:\n for permission in Permission.objects.all():\n if not self.__is_permission_allowed_to_delete(permission):\n continue\n\n if self.__is_permission_in_groups(permission.codename):\n raise PermissionInUse(f'Permission {permission.codename} is used in groups. Delete it first.')\n\n permission.delete()\n\n self.stdout.write(f'Removed {permission.codename} permission')", "def test_permission_remove_unknown_user(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('permission remove joe TICKET_VIEW')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def _check_owner(user, study):\n if not user.id == study.owner:\n raise HTTPError(403, \"User %s does not own study %d\" %\n (user.id, study.id))", "def test_leave_single_owner_denied(member_service, community, owner):\n data = {\"members\": [{\"type\": \"user\", \"id\": str(owner.id)}]}\n pytest.raises(\n ValidationError,\n member_service.delete,\n owner.identity,\n community._record.id,\n data,\n )", "def verifyOwner(owner_id, this_user_id):\n if not (owner_id == this_user_id):\n raise ValueError(\n \"You don't have permission to edit this.\")", "def test_permission_remove_one_action_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('permission remove anonymous TICKET_MODIFY')\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def test_access_admin_not_owner_else_member(user_1, user_2, user_3):\n admin_userpermission_change(user_1[\"token\"], user_3[\"u_id\"], OWNER)\n with pytest.raises(AccessError):\n admin_userpermission_change(user_2[\"token\"], user_3[\"u_id\"], MEMBER)\n clear()", "def test_permission_remove_action_not_granted(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('permission remove anonymous TICKET_CREATE')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def delete(self, using=None):\n from organizations.exceptions import OwnershipRequired\n if self.organization.owner.organization_user.id == self.id:\n raise OwnershipRequired(_(\"Cannot delete organization owner before organization or transferring ownership\"))\n else:\n super(OrganizationUser, self).delete(using=using)", "def validate_owner(model, request):\n auth_token = request.headers.get('Authentication-Token')\n user = _token_loader(auth_token)\n if model.owner != user:\n abort(401)", "def _delete_entity_owner(self, username):\n\t\t#us.log('Entity server is deleting the following owner : { ' + username + ' }')\n\t\towners_to_remove = []\n\t\tfor e_o in self._entity_owners:\n\t\t\tif e_o.username == username:\n\t\t\t\towners_to_remove.append(e_o)\n\t\tfor e_o in owners_to_remove:\n\t\t\tself._entity_owners.remove(e_o)\n\t\tself._db_api.delete_owner(username)\n\t\treturn us.SUCCESS_MESSAGE", "def test_cant_remove_other_users_from_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=8,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=False,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n self.assert_json_error(result, \"Insufficient permission\")", "def test_iam_project_ownership_delete(self):\n pass", "def block_owner_deletion(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"block_owner_deletion\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checking if able to remove an owner who is the last owner of the channel
def test_channel_removeowner_last_owner(): clear() register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) channel_join(register_first_result['token'], randChannel_id['channel_id']) #register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen') #channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id']) # removing third user channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])
[ "def channel_removeowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n u_id_permission = database.get_permission_dict(u_id)\n if u_id_permission[\"permission_id\"] == 1:\n raise error.AccessError(description=\"user being removed is the owner of the slackr\")\n\n # checks if u_id is not an owner of the channel\n # also checks if current auth user is an owner of the channel\n is_u_owner = False\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n is_u_owner = True\n if curr_id == owner_id:\n is_curr_owner = True\n if is_u_owner is False:\n raise error.InputError(description=\"user being removed is not an owner of the channel\")\n\n\n # if the auth user is owner of slackr, allows him to remove u_id as owner\n if user_perms[\"permission_id\"] == 1:\n # removes the user from channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # if the auth user is an owner of the channel, allow him to remove u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"Authorised user user is not an owner of the channel,\n or of the slackr\"\"\")", "def test_channel_removeowner_owner_flockr():\n clear()\n register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_channel_removeowner_not_owner_permissions():\n clear()\n auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def is_channel_owner():\n\n async def check(ctx):\n if ctx.guild:\n owner = ctx.author == ctx.guild.owner\n if not owner:\n await ctx.send(\"I guess you are not this server's pogchamp. Bruh.\")\n return owner\n return True\n\n return commands.check(check)", "def channel_removeowner(token, channel_id, u_id):\n # Convert channel_id and u_id to integer\n channel_id = int(channel_id)\n u_id = int(u_id)\n\n # Validate token, channel and user\n if not validator.is_valid_token(token):\n raise AccessError(INVALID_TOKEN)\n if not validator.is_valid_channel(channel_id):\n raise ValueError(INVALID_CHANNEL)\n if not validator.is_valid_user(u_id):\n raise ValueError(INVALID_USER)\n\n # Locate channel and user in database\n channel = database.get_channel_by_id(channel_id)\n auth_user_id = jwt_handler.decode_token(token)\n auth_user = database.get_user_by_id(auth_user_id)\n owner_list = channel[\"owner_ids\"]\n\n if auth_user[\"permission\"] not in [OWNER, ADMIN] or\\\n auth_user_id not in channel[\"owner_ids\"]:\n raise AccessError(CHANNEL_DEL_OWNER_NO_AUTH)\n\n # Check that the added_user is currently an owner\n if u_id not in owner_list:\n raise ValueError(CHANNEL_NOT_OWNER)\n\n # Remove user from owner_ids\n owner_list.remove(u_id)\n database.update_channel_by_id(channel_id, {\n \"owner_ids\": owner_list\n })\n\n return {}", "def test_can_remove_owner_when_multiple_owners(self):\n # Make an extra owner for the project\n collaborator = self.project.collaborators.create(\n user = get_user_model().objects.create_user('owner2'),\n role = Collaborator.Role.OWNER\n )\n # Authenticate the client as an owner of the project\n self.authenticateAsProjectOwner(collaborator.project)\n # Try to delete them\n self.assertDeleteResponseIsEmpty(\"/collaborators/{}/\".format(collaborator.pk))\n # Test that the collaborator was removed\n self.assertFalse(Collaborator.objects.filter(pk = collaborator.pk).exists())", "def channel_removeowner(token, channel_id, u_id):\n auth_u_id = get_id_from_token(token)\n channel = channels.get(channel_id)\n if channel is None:\n raise ValueError(\"channel_id does not exist.\")\n if u_id not in channel[\"owners\"]:\n raise ValueError(\"user is not an owner\")\n user = users.get(auth_u_id)\n if auth_u_id not in channel[\"owners\"] and user[\"is_admin\"] is False:\n raise AccessError(\"You do not have permission to remove owners\")\n\n channels.remove(channel_id, \"owners\", u_id)", "def test_cannot_remove_sole_owner(self):\n # Get the current sole owner for the project\n collaborator = self.project.collaborators.filter(role = Collaborator.Role.OWNER).first()\n # Authenticate as the owner from the discovered collaborator\n self.client.force_authenticate(user = collaborator.user)\n # Trying to delete the owner record should result in a conflict\n response_data = self.assertConflict(\n \"/collaborators/{}/\".format(collaborator.pk),\n \"DELETE\"\n )\n # Check that the error code is what we expected\n self.assertEqual(response_data['code'], 'sole_owner')\n # Test that the collaborator was not removed\n self.assertTrue(Collaborator.objects.filter(pk = collaborator.pk).exists())", "def channel_addowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n # check if user u_id is already an owner of the channel and raise InputError if so\n # also checks to see if current auth user is a owner of channel\n\n # a counter to check if user is a member of the channel\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n raise error.InputError(description=\"user u_id is already an owner of this channel\")\n # checks if curr_id is an owner of channel\n if curr_id == owner_id:\n is_curr_owner = True\n\n # checks if the user u_id is a member of the channel already\n is_u_member = False\n for member_id in curr_channel[\"member_ids\"]:\n if u_id == member_id:\n is_u_member = True\n\n\n # if the auth user is an owner of the slackr, allow him to add u_id as owner of channel\n if is_u_member is True:\n if user_perms[\"permission_id\"] == 1:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # if the auth user is an owner of the channel, allow him to add u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"current user is not an owner of the channel,\n or of the slackr\"\"\")", "def is_owner(ctx):\n return ctx.author.id in owners", "def my_owner():\n async def predicate(ctx):\n return ctx.guild is not None and ctx.me.guild_permissions.read_messages \\\n and ctx.author.id == 688917796943560734\n return commands.check(predicate)", "def user_is_channel_owner(channel_id: int, auth_user_id: int) -> bool: \n channel = get_channel(channel_id)\n owners = channel.get('owner_members')\n if not owners:\n # don't iterate over NoneType\n return False\n for owner in owners:\n if owner.get('u_id') == auth_user_id:\n return True\n return False", "def unorphaned(self):\n return self.new_owner == self.user", "def test_messages_remove_owner():\n owner_info = auth_register(\"337992611@gamil.com\", \"ccc337992611\", \"Min\", \"Li\")\n user_info = auth_register(\"392611@gamil.com\", \"ccc337992611\", \"Min\", \"Li\")\n\n channel_info = channels_create(user_info['token'], 'test_one', True)\n channel_join(owner_info['token'], channel_info['channel_id'])\n\n msg_info = message_send_helper(user_info['token'], channel_info['channel_id'], \"Test msg!\")\n message_remove(owner_info['token'], msg_info['msg_dict']['message_id'])\n\n search_data = search(owner_info['token'], \"Test msg!\")\n\n assert search_data['messages'] == []", "def remove_token_from_owners_list(ctx, t_owner, t_id):\n ckey = concat(t_owner, t_id)\n if Get(ctx, ckey):\n Delete(ctx, ckey)\n return True\n\n print(\"token not found in owner's list\")\n return False", "def test_channel_removeowner_invalid_user_id():\n clear()\n auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], \"invalidemail@gmail.com\")", "def is_owner(self, author):\n return not self.server or author == self.server.owner", "def block_owner_deletion(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"block_owner_deletion\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checking if owner of the flockr who is not the channel owner can remove owner
def test_channel_removeowner_owner_flockr(): clear() register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) channel_join(register_first_result['token'], randChannel_id['channel_id']) channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])
[ "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def channel_removeowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n u_id_permission = database.get_permission_dict(u_id)\n if u_id_permission[\"permission_id\"] == 1:\n raise error.AccessError(description=\"user being removed is the owner of the slackr\")\n\n # checks if u_id is not an owner of the channel\n # also checks if current auth user is an owner of the channel\n is_u_owner = False\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n is_u_owner = True\n if curr_id == owner_id:\n is_curr_owner = True\n if is_u_owner is False:\n raise error.InputError(description=\"user being removed is not an owner of the channel\")\n\n\n # if the auth user is owner of slackr, allows him to remove u_id as owner\n if user_perms[\"permission_id\"] == 1:\n # removes the user from channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # if the auth user is an owner of the channel, allow him to remove u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"Authorised user user is not an owner of the channel,\n or of the slackr\"\"\")", "def test_channel_removeowner_not_owner_permissions():\n clear()\n auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def test_channel_removeowner_last_owner():\n clear()\n register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n #register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen')\n #channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n # removing third user\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def channel_removeowner(token, channel_id, u_id):\n auth_u_id = get_id_from_token(token)\n channel = channels.get(channel_id)\n if channel is None:\n raise ValueError(\"channel_id does not exist.\")\n if u_id not in channel[\"owners\"]:\n raise ValueError(\"user is not an owner\")\n user = users.get(auth_u_id)\n if auth_u_id not in channel[\"owners\"] and user[\"is_admin\"] is False:\n raise AccessError(\"You do not have permission to remove owners\")\n\n channels.remove(channel_id, \"owners\", u_id)", "def channel_removeowner(token, channel_id, u_id):\n # Convert channel_id and u_id to integer\n channel_id = int(channel_id)\n u_id = int(u_id)\n\n # Validate token, channel and user\n if not validator.is_valid_token(token):\n raise AccessError(INVALID_TOKEN)\n if not validator.is_valid_channel(channel_id):\n raise ValueError(INVALID_CHANNEL)\n if not validator.is_valid_user(u_id):\n raise ValueError(INVALID_USER)\n\n # Locate channel and user in database\n channel = database.get_channel_by_id(channel_id)\n auth_user_id = jwt_handler.decode_token(token)\n auth_user = database.get_user_by_id(auth_user_id)\n owner_list = channel[\"owner_ids\"]\n\n if auth_user[\"permission\"] not in [OWNER, ADMIN] or\\\n auth_user_id not in channel[\"owner_ids\"]:\n raise AccessError(CHANNEL_DEL_OWNER_NO_AUTH)\n\n # Check that the added_user is currently an owner\n if u_id not in owner_list:\n raise ValueError(CHANNEL_NOT_OWNER)\n\n # Remove user from owner_ids\n owner_list.remove(u_id)\n database.update_channel_by_id(channel_id, {\n \"owner_ids\": owner_list\n })\n\n return {}", "def is_channel_owner():\n\n async def check(ctx):\n if ctx.guild:\n owner = ctx.author == ctx.guild.owner\n if not owner:\n await ctx.send(\"I guess you are not this server's pogchamp. Bruh.\")\n return owner\n return True\n\n return commands.check(check)", "def test_can_remove_owner_when_multiple_owners(self):\n # Make an extra owner for the project\n collaborator = self.project.collaborators.create(\n user = get_user_model().objects.create_user('owner2'),\n role = Collaborator.Role.OWNER\n )\n # Authenticate the client as an owner of the project\n self.authenticateAsProjectOwner(collaborator.project)\n # Try to delete them\n self.assertDeleteResponseIsEmpty(\"/collaborators/{}/\".format(collaborator.pk))\n # Test that the collaborator was removed\n self.assertFalse(Collaborator.objects.filter(pk = collaborator.pk).exists())", "def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def channel_addowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n # check if user u_id is already an owner of the channel and raise InputError if so\n # also checks to see if current auth user is a owner of channel\n\n # a counter to check if user is a member of the channel\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n raise error.InputError(description=\"user u_id is already an owner of this channel\")\n # checks if curr_id is an owner of channel\n if curr_id == owner_id:\n is_curr_owner = True\n\n # checks if the user u_id is a member of the channel already\n is_u_member = False\n for member_id in curr_channel[\"member_ids\"]:\n if u_id == member_id:\n is_u_member = True\n\n\n # if the auth user is an owner of the slackr, allow him to add u_id as owner of channel\n if is_u_member is True:\n if user_perms[\"permission_id\"] == 1:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # if the auth user is an owner of the channel, allow him to add u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"current user is not an owner of the channel,\n or of the slackr\"\"\")", "def is_owner(ctx):\n return ctx.author.id in owners", "def test_channel_removeowner_invalid_user_id():\n clear()\n auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], \"invalidemail@gmail.com\")", "def remove_token_from_owners_list(ctx, t_owner, t_id):\n ckey = concat(t_owner, t_id)\n if Get(ctx, ckey):\n Delete(ctx, ckey)\n return True\n\n print(\"token not found in owner's list\")\n return False", "def block_owner_deletion(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"block_owner_deletion\")", "def test_cannot_remove_sole_owner(self):\n # Get the current sole owner for the project\n collaborator = self.project.collaborators.filter(role = Collaborator.Role.OWNER).first()\n # Authenticate as the owner from the discovered collaborator\n self.client.force_authenticate(user = collaborator.user)\n # Trying to delete the owner record should result in a conflict\n response_data = self.assertConflict(\n \"/collaborators/{}/\".format(collaborator.pk),\n \"DELETE\"\n )\n # Check that the error code is what we expected\n self.assertEqual(response_data['code'], 'sole_owner')\n # Test that the collaborator was not removed\n self.assertTrue(Collaborator.objects.filter(pk = collaborator.pk).exists())", "def my_owner():\n async def predicate(ctx):\n return ctx.guild is not None and ctx.me.guild_permissions.read_messages \\\n and ctx.author.id == 688917796943560734\n return commands.check(predicate)", "def test_messages_remove_owner():\n owner_info = auth_register(\"337992611@gamil.com\", \"ccc337992611\", \"Min\", \"Li\")\n user_info = auth_register(\"392611@gamil.com\", \"ccc337992611\", \"Min\", \"Li\")\n\n channel_info = channels_create(user_info['token'], 'test_one', True)\n channel_join(owner_info['token'], channel_info['channel_id'])\n\n msg_info = message_send_helper(user_info['token'], channel_info['channel_id'], \"Test msg!\")\n message_remove(owner_info['token'], msg_info['msg_dict']['message_id'])\n\n search_data = search(owner_info['token'], \"Test msg!\")\n\n assert search_data['messages'] == []", "def user_is_channel_owner(channel_id: int, auth_user_id: int) -> bool: \n channel = get_channel(channel_id)\n owners = channel.get('owner_members')\n if not owners:\n # don't iterate over NoneType\n return False\n for owner in owners:\n if owner.get('u_id') == auth_user_id:\n return True\n return False", "async def cog_check(self, ctx:utils.Context):\n\n if ctx.author.id in self.bot.config['owners']:\n return True\n raise commands.NotOwner" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checking if AccessError is returned as expected if the owner of flockr is not a member of the channel
def test_channel_removeowner_owner_flockr_not_member(): clear() register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id']) with pytest.raises(AccessError): assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])
[ "def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_channel_removeowner_not_owner_permissions():\n clear()\n auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def test_lock_not_owned(self):\n response = self.app.get('/check/321', headers=self.auth_header(\"test@mail.com\", \"python\"))\n self.assertEqual(403, response.status_code)", "def test_util_has_perm_or_owns_sanity(self):\n me = User.objects.get(pk=118533)\n my_t = Thread.objects.filter(creator=me)[0]\n other_t = Thread.objects.exclude(creator=me)[0]\n perm = 'forums_forum.thread_edit_forum'\n allowed = access.has_perm_or_owns(me, perm, my_t, self.forum_1)\n eq_(allowed, True)\n allowed = access.has_perm_or_owns(me, perm, other_t, self.forum_1)\n eq_(allowed, False)", "def test_util_has_perm_or_owns_sanity(self):\n from kitsune.forums.tests import ThreadFactory\n me = UserFactory()\n my_t = ThreadFactory(creator=me)\n other_t = ThreadFactory()\n perm = 'forums_forum.thread_edit_forum'\n allowed = access.has_perm_or_owns(me, perm, my_t, my_t.forum)\n eq_(allowed, True)\n allowed = access.has_perm_or_owns(me, perm, other_t, other_t.forum)\n eq_(allowed, False)", "def is_channel_owner():\n\n async def check(ctx):\n if ctx.guild:\n owner = ctx.author == ctx.guild.owner\n if not owner:\n await ctx.send(\"I guess you are not this server's pogchamp. Bruh.\")\n return owner\n return True\n\n return commands.check(check)", "def test_channel_addowner_already_an_owner():\n clear()\n auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_channel_removeowner_owner_flockr():\n clear()\n register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def my_owner():\n async def predicate(ctx):\n return ctx.guild is not None and ctx.me.guild_permissions.read_messages \\\n and ctx.author.id == 688917796943560734\n return commands.check(predicate)", "def test_channel_join_already_in_channel():\n clear()\n user = auth_register('user@gmail.com', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n with pytest.raises(AccessError):\n channel_join(user['token'], userchannel_id['channel_id'])", "def test_channel_join_private_owner():\n clear()\n joiner = auth_register('joiner@gmail.com', '123abc!@#', 'first', 'last')\n user = auth_register('user@gmail.com', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', False)\n channel_join(joiner['token'], userchannel_id['channel_id']) \n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['all_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n },\n {\n 'u_id' : joiner['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])", "async def interaction_check(self, interaction: discord.Interaction):\n owner_id = interaction.guild.owner.id\n if interaction.user.id not in (\n owner_id,\n *interaction.client.owner_ids,\n ):\n await interaction.response.send_message(\n content=_(\"You are not authorized to interact with this.\"), ephemeral=True\n )\n return False\n return True", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def _check_access(self):\r\n return self._is_access_free", "def test_pin_msg_not_owner():\n data = helper_create_react()\n # Create second user and join the channel with the message.\n user_infor = auth_register('337992611@qq.com', 'HereyouAreMyP', 'not', 'exit')\n channel_join(user_infor['token'], data['channel_id'])\n with pytest.raises(AccessError):\n message_pin(user_infor['token'], data['message_id'])", "def check_channel_request(self, kind, chanid):\n return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED", "def ccheck(self, msg):\r\n if msg.channel == self.channel or (msg.channel.is_private and self.ispm):\r\n return True\r\n return False", "async def lock(ctx):\n member = ctx.message.author\n channel = ctx.message.channel\n\n if (channel.category.name in [\"beta\", \"staff\", \"Pi-Bot\"]):\n return await ctx.send(\"This command is not suitable for this channel because of its category.\")\n\n member_role = discord.utils.get(member.guild.roles, name=ROLE_MR)\n if (channel.category.name == CATEGORY_STATES):\n await ctx.channel.set_permissions(member_role, add_reactions=False, send_messages=False)\n else:\n await ctx.channel.set_permissions(member_role, add_reactions=False, send_messages=False, read_messages=True)\n\n wiki_role = discord.utils.get(member.guild.roles, name=ROLE_WM)\n gm_role = discord.utils.get(member.guild.roles, name=ROLE_GM)\n admin_role = discord.utils.get(member.guild.roles, name=ROLE_AD)\n bot_role = discord.utils.get(member.guild.roles, name=ROLE_BT)\n await ctx.channel.set_permissions(wiki_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(gm_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(admin_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(bot_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.send(\"Locked the channel to Member access.\")", "def test_edit_msg_no_access():\n data = create_pinned_message()\n user2 = auth_register(\"ema222@email.com\", \"password\", \"Bill\", \"Bill\")\n channel_join(user2['token'], data['channel_id'])\n with pytest.raises(AccessError):\n message_edit(user2['token'], data['message_id'], \"test\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets a system Hamiltonian to the Hubbard Hamiltonian. Does exactly this. If the system hamiltonian has some other terms on it, there are not touched. So be sure to use this function only in newly created `System` objects.
def set_hamiltonian(self, system): system.clear_hamiltonian() if 'bh' in system.left_block.operators.keys(): system.add_to_hamiltonian(left_block_op='bh') if 'bh' in system.right_block.operators.keys(): system.add_to_hamiltonian(right_block_op='bh') system.add_to_hamiltonian('dimer', 'id', 'id', 'id', -(1. - self.U)) system.add_to_hamiltonian('id', 'dimer', 'id', 'id', -(1. - self.U)) system.add_to_hamiltonian('id', 'id', 'dimer', 'id', -(1. - self.U)) system.add_to_hamiltonian('id', 'id', 'id', 'dimer', -(1. - self.U)) # system.add_to_hamiltonian('dimer', 'id', 'id', 'id', self.U) # system.add_to_hamiltonian('id', 'dimer', 'id', 'id', self.U) # system.add_to_hamiltonian('id', 'id', 'dimer', 'id', self.U) # system.add_to_hamiltonian('id', 'id', 'id', 'dimer', self.U) system.add_to_hamiltonian('rprm_up_minus_dag', 'rprm_up_plus', 'id', 'id', -(1. + self.U)/2.) system.add_to_hamiltonian('rprm_down_minus_dag', 'rprm_down_plus', 'id', 'id', -(1. + self.U)/2.) system.add_to_hamiltonian('rprm_up_minus', 'rprm_up_plus_dag', 'id', 'id', (1. + self.U)/2.) system.add_to_hamiltonian('rprm_down_minus', 'rprm_down_plus_dag', 'id', 'id', (1. + self.U)/2.) system.add_to_hamiltonian('id', 'rprm_up_minus_dag', 'rprm_up_plus', 'id', -(1.+self.U)/2.) system.add_to_hamiltonian('id', 'rprm_down_minus_dag', 'rprm_down_plus', 'id', -(1.+self.U)/2.) system.add_to_hamiltonian('id', 'rprm_up_minus', 'rprm_up_plus_dag', 'id', (1.+self.U)/2.) system.add_to_hamiltonian('id', 'rprm_down_minus', 'rprm_down_plus_dag', 'id', (1.+self.U)/2.) system.add_to_hamiltonian('id','id', 'rprm_up_minus_dag', 'rprm_up_plus', -(1.+self.U)/2.) system.add_to_hamiltonian('id','id', 'rprm_down_minus_dag', 'rprm_down_plus', -(1.+self.U)/2.) system.add_to_hamiltonian('id','id', 'rprm_up_minus', 'rprm_up_plus_dag', (1.+self.U)/2.) system.add_to_hamiltonian('id','id', 'rprm_down_minus', 'rprm_down_plus_dag', (1.+self.U)/2.)
[ "def set_hamiltonian(self, system):\n system.clear_hamiltonian()\n if 'bh' in system.left_block.operators.keys():\n system.add_to_hamiltonian(left_block_op='bh')\n if 'bh' in system.right_block.operators.keys():\n system.add_to_hamiltonian(right_block_op='bh')\n system.add_to_hamiltonian('c_up', 'c_up_dag', 'id', 'id', -1.)\n system.add_to_hamiltonian('c_up_dag', 'c_up', 'id', 'id', -1.)\n system.add_to_hamiltonian('c_down', 'c_down_dag', 'id', 'id', -1.)\n system.add_to_hamiltonian('c_down_dag', 'c_down', 'id', 'id', -1.)\n system.add_to_hamiltonian('id', 'c_up', 'c_up_dag', 'id', -1.)\n system.add_to_hamiltonian('id', 'c_up_dag', 'c_up', 'id', -1.)\n system.add_to_hamiltonian('id', 'c_down', 'c_down_dag', 'id', -1.)\n system.add_to_hamiltonian('id', 'c_down_dag', 'c_down', 'id', -1.)\n system.add_to_hamiltonian('id', 'id', 'c_up', 'c_up_dag', -1.)\n system.add_to_hamiltonian('id', 'id', 'c_up_dag', 'c_up', -1.)\n system.add_to_hamiltonian('id', 'id', 'c_down', 'c_down_dag', -1.)\n system.add_to_hamiltonian('id', 'id', 'c_down_dag', 'c_down', -1.)\n system.add_to_hamiltonian('u', 'id', 'id', 'id', self.U)\n system.add_to_hamiltonian('id', 'u', 'id', 'id', self.U)\n system.add_to_hamiltonian('id', 'id', 'u', 'id', self.U)\n system.add_to_hamiltonian('id', 'id', 'id', 'u', self.U)", "def set_block_hamiltonian(self, system):\n # If you have a block hamiltonian in your block, add it\n if 'bh' in system.growing_block.operators.keys():\n system.add_to_block_hamiltonian('bh', 'id')\n system.add_to_block_hamiltonian('c_up', 'c_up_dag', -1.)\n system.add_to_block_hamiltonian('c_up_dag', 'c_up', -1.)\n system.add_to_block_hamiltonian('c_down', 'c_down_dag', -1.)\n system.add_to_block_hamiltonian('c_down_dag', 'c_down', -1.)\n system.add_to_block_hamiltonian('id', 'u', self.U)\n system.add_to_block_hamiltonian('u', 'id', self.U)", "def set_hamiltonian_to_AF_Heisenberg(system):\n system.clear_hamiltonian()\n if 'bh' in system.left_block.operators.keys():\n system.add_to_hamiltonian(left_block_op='bh')\n if 'bh' in system.right_block.operators.keys():\n system.add_to_hamiltonian(right_block_op='bh')\n system.add_to_hamiltonian('id', 'id', 's_z', 's_z')\n system.add_to_hamiltonian('id', 'id', 's_p', 's_m', .5)\n system.add_to_hamiltonian('id', 'id', 's_m', 's_p', .5)\n system.add_to_hamiltonian('id', 's_z', 's_z', 'id')\n system.add_to_hamiltonian('id', 's_p', 's_m', 'id', .5)\n system.add_to_hamiltonian('id', 's_m', 's_p', 'id', .5)\n system.add_to_hamiltonian('s_z', 's_z', 'id', 'id')\n system.add_to_hamiltonian('s_p', 's_m', 'id', 'id', .5)\n system.add_to_hamiltonian('s_m', 's_p', 'id', 'id', .5)", "def set_block_hamiltonian(self, tmp_matrix_for_bh, system):\n # If you have a block hamiltonian in your block, add it\n if 'bh' in system.growing_block.operators.keys():\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'bh', 'id')\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'id', 'dimer', -(1. - self.U))\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'dimer', 'id', -(1. - self.U))\n# system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'id', 'dimer', self.U)\n# system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'dimer', 'id', self.U)\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_up_minus_dag', 'rprm_up_plus', -(1.+self.U)/2.)\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_down_minus_dag', 'rprm_down_plus', -(1.+self.U)/2.)\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_up_minus', 'rprm_up_plus_dag', (1.+self.U)/2.)\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_down_minus', 'rprm_down_plus_dag', (1.+self.U)/2.)", "def set_block_hamiltonian_to_AF_Heisenberg(system):\n tmp_matrix_size = None\n if system.growing_side == 'left':\n tmp_matrix_size = system.get_left_dim()\n else: \n tmp_matrix_size = system.get_right_dim()\n tmp_matrix_for_bh = np.zeros((tmp_matrix_size, tmp_matrix_size))\n if 'bh' in system.growing_block.operators.keys():\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'bh', 'id')\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 's_z', 's_z')\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 's_p', 's_m', .5)\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 's_m', 's_p', .5)\n system.operators_to_add_to_block['bh'] = tmp_matrix_for_bh", "def update_zeeman_ham(self, B, B_theta, B_phi):\r\n\r\n # Convert B into cartesian\r\n B_vec = B * np.array([ np.sin(B_theta) * np.cos(B_phi),\r\n np.sin(B_theta) * np.sin(B_phi), \r\n np.cos(B_theta)])\r\n\r\n # Compute the electronic Zeeman Hamiltonian using beta * B @ g @ S\r\n HZ_el_gs = (self.beta_el * B_vec.dot(self.g_gs).dot(self.Sv3)).reshape((self.multS, self.multS))\r\n HZ_el_es = (self.beta_el * B_vec.dot(self.g_es).dot(self.Sv3)).reshape((self.multS, self.multS))\r\n # Expand into the nuclear spin space\r\n HZ_el_gs = np.kron(HZ_el_gs, np.identity(self.multI))\r\n HZ_el_es = np.kron(HZ_el_es, np.identity(self.multI))\r\n # Then expand into the superhyperfine nuclear spin space\r\n for neigh in self.neighbours: \r\n HZ_el_gs = np.kron(HZ_el_gs, np.identity(neigh.multH))\r\n HZ_el_es = np.kron(HZ_el_es, np.identity(neigh.multH))\r\n\r\n # Do the same for the REI nuclear Zeeman Hamiltonian but there is no \r\n # distinction between GS and ES. We assume the nuclear g factor is an \r\n # isotropic scalar.\r\n HZ_n_rei = (self.beta_n * self.g_n_rei * B_vec.dot(self.Iv3)).reshape((self.multI, self.multI))\r\n # Expand into the electronic spin space\r\n HZ_n_rei = np.kron(np.identity(self.multS), HZ_n_rei)\r\n # Expand into the superhyperfine nuclear spin space\r\n for neigh in self.neighbours: \r\n HZ_n_rei = np.kron(HZ_n_rei, np.identity(neigh.multH))\r\n\r\n # Do the same for the nuclear Zeeman Hamiltonian for the host nuclear spin\r\n HZ_n_host = 0\r\n for neigh_idx, neigh in enumerate(self.neighbours):\r\n # Start with a blank template for the S and I spaces\r\n HZ_n_host_accum = np.kron(np.identity(self.multS), np.identity(self.multI))\r\n HZ_n_host_term = (self.beta_n * neigh.g_n_host * B_vec.dot(neigh.Hv3)).reshape((neigh.multH, neigh.multH))\r\n\r\n # Iterate through the neighbours and expand into their spaces\r\n # When it's its own index we cross the actual Zeeman interaction instead\r\n for inner_neigh_idx, inner_neigh in enumerate(self.neighbours):\r\n if inner_neigh_idx == neigh_idx:\r\n HZ_n_host_accum = np.kron(HZ_n_host_accum, HZ_n_host_term)\r\n else:\r\n HZ_n_host_accum = np.kron(HZ_n_host_accum, np.identity(inner_neigh.multH))\r\n\r\n HZ_n_host += HZ_n_host_accum\r\n\r\n # Reset the Hamiltonian to be only the field-indep HF part and SHF part\r\n self.reset_ham()\r\n # Add in the just-computed Zeeman terms\r\n self.H_gs += (HZ_el_gs - HZ_n_rei - HZ_n_host)\r\n self.H_es += (HZ_el_es - HZ_n_rei - HZ_n_host)", "def generate_hamiltonian(self):\n ham = total_hamiltonian(self.cluster, self.magnetic_field, self.zfs, others=self.others,\n other_states=self.other_states, central_gyro=self.gyro, central_spin=self.spin)\n\n if self.pulses is not None:\n self.pulses.generate_pulses(dimensions=ham.dimensions, bath=self.cluster, vectors=ham.vectors)\n\n return ham", "def set_hbond(self) -> None:\n ...", "def generateHamiltonian(self):\n\n nprint(\"\\n Generating Hamiltonian matrix ...\")\n\n # For uniform RGF blocks\n if (self.DevType == self.COH_RGF_UNI): \n # no k-loop, real space hamiltonian only\n if self.kp.N == 0:\n \n lyr0 = self.geom.span(0, self.nw*self.nh-1) # extract block # 0\n lyr1 = self.geom.span(self.nw*self.nh, 2*self.nw*self.nh-1) # extract block # 1\n \n self.H0, self.S0 = generateHamOvl(self.hp, lyr0, lyr0)\n\n self.Hl, S = generateHamOvl(self.hp, lyr1, lyr0)\n\n# np.set_printoptions(linewidth=200)\n# print \"\\nS0\\n\"\n# print self.S0\n# print \"\\nH0\\n\"\n# print self.H0\n# print \"\\nHl\\n\"\n# print self.Hl\n# self.geom.exportGjf('dbg_geom.gjf')\n# lyr0.exportGjf('lyr0.gjf')\n# lyr1.exportGjf('lyr1.gjf')\n# lyr01 = lyr0+lyr1;\n# lyr01.exportGjf('lyr01.gjf')\n \n # nearest neighbors in transverse direction for k-loop\n else:\n self.H0 = []\n self.S0 = []\n self.Hl = []\n self.pv = []\n self.pvl = []\n \n lv = self.geom.LatticeVector\n self.pv.append(lv*LCoord(0,0,0))\n self.pvl.append(lv*LCoord(0,0,0))\n lyr0 = self.geom.span(0, self.nw*self.nh-1) # extract block # 0\n lyr1 = self.geom.span(self.nw*self.nh, 2*self.nw*self.nh-1) # extract block # 1\n \n H, S = generateHamOvl(self.hp, lyr0, lyr0)\n self.H0.append(H); self.S0.append(S)\n H, S = generateHamOvl(self.hp, lyr1, lyr0)\n self.Hl.append(H)\n \n self.pv.append(lv*LCoord(0,1,0))\n lyr0top = lyr0 + self.pv[1] # top neighbor of layer 0\n H, S = generateHamOvl(self.hp, lyr0, lyr0top)\n self.H0.append(H)\n \n self.pv.append(lv*LCoord(0, -1, 0))\n lyr0bot = lyr0 + self.pv[2] # bottom neighbor of layer 0\n H, S = generateHamOvl(self.hp, lyr0, lyr0bot)\n self.H0.append(H)\n \n self.pvl.append(lv*LCoord(-1,1,0))\n H, S = generateHamOvl(self.hp, lyr1, lyr0top)\n self.Hl.append(H)\n \n self.pvl.append(lv*LCoord(-1,-1,0))\n H, S = generateHamOvl(self.hp, lyr1, lyr0bot) \n self.Hl.append(H)\n # Non-uniform RGF blocks \n elif (self.DevType == self.COH_RGF_NON_UNI):\n self.H0 = []\n self.S0 = []\n self.Hl = []\n beg = 0\n for ib in range(0, self.nb): # setup the block hamiltonian\n end = beg + self.nbw[ib] - 1\n \n # generate H_i,i and S_i,i\n lyri = self.geom.span(beg, end) # extract block # i\n H0,S0 = generateHamOvl(self.hp, lyri, lyri)\n self.H0.append(H0)\n self.S0.append(S0)\n\n # generate H_i,i-1\n if ib > 0:\n Hl,Sl = generateHamOvl(self.hp, lyri, lyrim1)\n self.Hl.append(Hl)\n \n lyrim1 = lyri\n beg = end + 1\n # Coupling matrix between two blocks of left contact\n Hl,Sl = generateHamOvl(self.hp, self.lyr_0, self.lyr_0m1)\n self.Hl.insert(0, Hl)\n # Coupling matrix between two blocks of right contact\n Hl,Sl = generateHamOvl(self.hp, self.lyr_nb, self.lyr_nbm1)\n self.Hl.append(Hl)\n\n nprint(\" done.\")", "def set_operators_to_update_to_AF_Heisenberg(system):\n system.add_to_operators_to_update('s_z', site_op='s_z')\n system.add_to_operators_to_update('s_p', site_op='s_p')\n system.add_to_operators_to_update('s_m', site_op='s_m')", "def reset_hessian_and_bias(self):\n # reset_shared_var(self.t_H)\n t = self.QUAD_REG\n if len(t.shape) == 1:\n self.t_H.set_value(np.diag(self.QUAD_REG))\n elif len(t.shape) == 2:\n self.t_H.set_value(self.QUAD_REG)\n else:\n raise ValueError('Invalid quad_reg shape')\n\n reset_shared_var(self.t_B)", "def get_bare_hamiltonian(self):\n warnings.warn('bare_hamiltonian() is deprecated, use bare_hamiltonian() instead', FutureWarning)\n return self.bare_hamiltonian()", "def _Hom_(self, B, category):\n cat = MagmaticAlgebras(self.base_ring()).FiniteDimensional().WithBasis()\n if category.is_subcategory(cat):\n from sage.algebras.finite_dimensional_algebras.finite_dimensional_algebra_morphism import FiniteDimensionalAlgebraHomset\n return FiniteDimensionalAlgebraHomset(self, B, category=category)\n return super(FiniteDimensionalAlgebra, self)._Hom_(B, category)", "def display_hamiltonian(H):\n terms = split_hamiltonian(H)\n\n def label(s):\n if s == 'H0':\n return r'\\hat{H}_0'\n elif s == 'Hint':\n return r'\\hat{H}_{\\text{int}}'\n else:\n try:\n prefix, ind = s.split('_')\n except ValueError:\n print(s)\n raise\n return r'\\hat{H}_{\\Omega_%s}' % ind\n\n lines = []\n lines.append(r'\\begin{align}')\n lines.append(r' \\hat{H} &= %s\\\\' % \" + \".join([label(name) for name in terms.keys()]))\n for name, H in terms.items():\n lines.append(r' %s &= %s\\\\' % (label(name), tex(H)))\n lines.append(r'\\end{align}')\n display(Latex(\"\\n\".join(lines)))", "def _ctrl_hum_set(self, osrs_h):\n data = osrs_h & 0x7\n self._bus.write_byte_data(self.addr, self.CTRL_HUM,\n data)", "def set_hmf(self, set_mf):\n self.mf = set_mf", "def add_Hs(self):\n self.log.debug('Adding hydrogens')\n self.mol = AllChem.AddHs(self.mol)\n changed = []\n Chem.SanitizeMol(self.mol)\n for atom in self.mol.GetAtoms():\n if atom.GetSymbol() == '*':\n atom.SetAtomicNum(6)\n atom.SetHybridization(Chem.HybridizationType.SP3)\n changed.append(atom.GetIdx())\n AllChem.EmbedMolecule(self.mol, useRandomCoords=True)\n AllChem.MMFFOptimizeMolecule(self.mol)\n AllChem.ComputeGasteigerCharges(self.mol, throwOnParamFailure=False)\n for i, atom in enumerate(self.mol.GetAtoms()):\n if i in changed:\n atom.SetAtomicNum(0)\n self.fix_mol()", "def createQHamiltonian(self, frame: str = \"rot\") -> QHamiltonian:\n\n # Initialize the hamiltonian\n\n ham = QHamiltonian(subSysNum=self.subSysNum, sysLevel=self.sysLevel, dt=self.dt)\n\n # Create the system Hamiltonian in the lab frame\n\n if frame == \"lab\":\n\n self._generateDrift(ham)\n if self.couplingMap is not None:\n self._generateTimeIndepCoupTerm(ham)\n self._setDecoherence(ham)\n\n # Create the system Hamiltonian in the rotating frame of qubit frequency\n\n elif frame == \"rot\":\n\n self._generateDrift(ham)\n self._setDecoherence(ham)\n\n if self.couplingMap is not None:\n self._generateCoupTerm(ham)\n\n return ham", "def tscheme_setheading(h):\n _check_nums(h)\n _tscheme_prep()\n turtle.setheading(h)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the block Hamiltonian to the Hubbard model block Hamiltonian.
def set_block_hamiltonian(self, tmp_matrix_for_bh, system): # If you have a block hamiltonian in your block, add it if 'bh' in system.growing_block.operators.keys(): system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'bh', 'id') system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'id', 'dimer', -(1. - self.U)) system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'dimer', 'id', -(1. - self.U)) # system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'id', 'dimer', self.U) # system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'dimer', 'id', self.U) system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_up_minus_dag', 'rprm_up_plus', -(1.+self.U)/2.) system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_down_minus_dag', 'rprm_down_plus', -(1.+self.U)/2.) system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_up_minus', 'rprm_up_plus_dag', (1.+self.U)/2.) system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_down_minus', 'rprm_down_plus_dag', (1.+self.U)/2.)
[ "def set_block_hamiltonian(self, system):\n # If you have a block hamiltonian in your block, add it\n if 'bh' in system.growing_block.operators.keys():\n system.add_to_block_hamiltonian('bh', 'id')\n system.add_to_block_hamiltonian('c_up', 'c_up_dag', -1.)\n system.add_to_block_hamiltonian('c_up_dag', 'c_up', -1.)\n system.add_to_block_hamiltonian('c_down', 'c_down_dag', -1.)\n system.add_to_block_hamiltonian('c_down_dag', 'c_down', -1.)\n system.add_to_block_hamiltonian('id', 'u', self.U)\n system.add_to_block_hamiltonian('u', 'id', self.U)", "def set_hamiltonian(self, system):\n system.clear_hamiltonian()\n if 'bh' in system.left_block.operators.keys():\n system.add_to_hamiltonian(left_block_op='bh')\n if 'bh' in system.right_block.operators.keys():\n system.add_to_hamiltonian(right_block_op='bh')\n system.add_to_hamiltonian('c_up', 'c_up_dag', 'id', 'id', -1.)\n system.add_to_hamiltonian('c_up_dag', 'c_up', 'id', 'id', -1.)\n system.add_to_hamiltonian('c_down', 'c_down_dag', 'id', 'id', -1.)\n system.add_to_hamiltonian('c_down_dag', 'c_down', 'id', 'id', -1.)\n system.add_to_hamiltonian('id', 'c_up', 'c_up_dag', 'id', -1.)\n system.add_to_hamiltonian('id', 'c_up_dag', 'c_up', 'id', -1.)\n system.add_to_hamiltonian('id', 'c_down', 'c_down_dag', 'id', -1.)\n system.add_to_hamiltonian('id', 'c_down_dag', 'c_down', 'id', -1.)\n system.add_to_hamiltonian('id', 'id', 'c_up', 'c_up_dag', -1.)\n system.add_to_hamiltonian('id', 'id', 'c_up_dag', 'c_up', -1.)\n system.add_to_hamiltonian('id', 'id', 'c_down', 'c_down_dag', -1.)\n system.add_to_hamiltonian('id', 'id', 'c_down_dag', 'c_down', -1.)\n system.add_to_hamiltonian('u', 'id', 'id', 'id', self.U)\n system.add_to_hamiltonian('id', 'u', 'id', 'id', self.U)\n system.add_to_hamiltonian('id', 'id', 'u', 'id', self.U)\n system.add_to_hamiltonian('id', 'id', 'id', 'u', self.U)", "def set_block_hamiltonian_to_AF_Heisenberg(system):\n tmp_matrix_size = None\n if system.growing_side == 'left':\n tmp_matrix_size = system.get_left_dim()\n else: \n tmp_matrix_size = system.get_right_dim()\n tmp_matrix_for_bh = np.zeros((tmp_matrix_size, tmp_matrix_size))\n if 'bh' in system.growing_block.operators.keys():\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'bh', 'id')\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 's_z', 's_z')\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 's_p', 's_m', .5)\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 's_m', 's_p', .5)\n system.operators_to_add_to_block['bh'] = tmp_matrix_for_bh", "def set_hamiltonian(self, system):\n system.clear_hamiltonian()\n if 'bh' in system.left_block.operators.keys():\n system.add_to_hamiltonian(left_block_op='bh')\n if 'bh' in system.right_block.operators.keys():\n system.add_to_hamiltonian(right_block_op='bh')\n system.add_to_hamiltonian('dimer', 'id', 'id', 'id', -(1. - self.U))\n system.add_to_hamiltonian('id', 'dimer', 'id', 'id', -(1. - self.U))\n system.add_to_hamiltonian('id', 'id', 'dimer', 'id', -(1. - self.U))\n system.add_to_hamiltonian('id', 'id', 'id', 'dimer', -(1. - self.U))\n \n# system.add_to_hamiltonian('dimer', 'id', 'id', 'id', self.U)\n# system.add_to_hamiltonian('id', 'dimer', 'id', 'id', self.U)\n# system.add_to_hamiltonian('id', 'id', 'dimer', 'id', self.U)\n# system.add_to_hamiltonian('id', 'id', 'id', 'dimer', self.U)\n\n system.add_to_hamiltonian('rprm_up_minus_dag', 'rprm_up_plus', 'id', 'id', -(1. + self.U)/2.)\n system.add_to_hamiltonian('rprm_down_minus_dag', 'rprm_down_plus', 'id', 'id', -(1. + self.U)/2.)\n system.add_to_hamiltonian('rprm_up_minus', 'rprm_up_plus_dag', 'id', 'id', (1. + self.U)/2.)\n system.add_to_hamiltonian('rprm_down_minus', 'rprm_down_plus_dag', 'id', 'id', (1. + self.U)/2.)\n \n system.add_to_hamiltonian('id', 'rprm_up_minus_dag', 'rprm_up_plus', 'id', -(1.+self.U)/2.)\n system.add_to_hamiltonian('id', 'rprm_down_minus_dag', 'rprm_down_plus', 'id', -(1.+self.U)/2.)\n system.add_to_hamiltonian('id', 'rprm_up_minus', 'rprm_up_plus_dag', 'id', (1.+self.U)/2.)\n system.add_to_hamiltonian('id', 'rprm_down_minus', 'rprm_down_plus_dag', 'id', (1.+self.U)/2.)\n\n system.add_to_hamiltonian('id','id', 'rprm_up_minus_dag', 'rprm_up_plus', -(1.+self.U)/2.)\n system.add_to_hamiltonian('id','id', 'rprm_down_minus_dag', 'rprm_down_plus', -(1.+self.U)/2.)\n system.add_to_hamiltonian('id','id', 'rprm_up_minus', 'rprm_up_plus_dag', (1.+self.U)/2.)\n system.add_to_hamiltonian('id','id', 'rprm_down_minus', 'rprm_down_plus_dag', (1.+self.U)/2.)", "def set_hamiltonian_to_AF_Heisenberg(system):\n system.clear_hamiltonian()\n if 'bh' in system.left_block.operators.keys():\n system.add_to_hamiltonian(left_block_op='bh')\n if 'bh' in system.right_block.operators.keys():\n system.add_to_hamiltonian(right_block_op='bh')\n system.add_to_hamiltonian('id', 'id', 's_z', 's_z')\n system.add_to_hamiltonian('id', 'id', 's_p', 's_m', .5)\n system.add_to_hamiltonian('id', 'id', 's_m', 's_p', .5)\n system.add_to_hamiltonian('id', 's_z', 's_z', 'id')\n system.add_to_hamiltonian('id', 's_p', 's_m', 'id', .5)\n system.add_to_hamiltonian('id', 's_m', 's_p', 'id', .5)\n system.add_to_hamiltonian('s_z', 's_z', 'id', 'id')\n system.add_to_hamiltonian('s_p', 's_m', 'id', 'id', .5)\n system.add_to_hamiltonian('s_m', 's_p', 'id', 'id', .5)", "def set_hbond(self) -> None:\n ...", "def update_zeeman_ham(self, B, B_theta, B_phi):\r\n\r\n # Convert B into cartesian\r\n B_vec = B * np.array([ np.sin(B_theta) * np.cos(B_phi),\r\n np.sin(B_theta) * np.sin(B_phi), \r\n np.cos(B_theta)])\r\n\r\n # Compute the electronic Zeeman Hamiltonian using beta * B @ g @ S\r\n HZ_el_gs = (self.beta_el * B_vec.dot(self.g_gs).dot(self.Sv3)).reshape((self.multS, self.multS))\r\n HZ_el_es = (self.beta_el * B_vec.dot(self.g_es).dot(self.Sv3)).reshape((self.multS, self.multS))\r\n # Expand into the nuclear spin space\r\n HZ_el_gs = np.kron(HZ_el_gs, np.identity(self.multI))\r\n HZ_el_es = np.kron(HZ_el_es, np.identity(self.multI))\r\n # Then expand into the superhyperfine nuclear spin space\r\n for neigh in self.neighbours: \r\n HZ_el_gs = np.kron(HZ_el_gs, np.identity(neigh.multH))\r\n HZ_el_es = np.kron(HZ_el_es, np.identity(neigh.multH))\r\n\r\n # Do the same for the REI nuclear Zeeman Hamiltonian but there is no \r\n # distinction between GS and ES. We assume the nuclear g factor is an \r\n # isotropic scalar.\r\n HZ_n_rei = (self.beta_n * self.g_n_rei * B_vec.dot(self.Iv3)).reshape((self.multI, self.multI))\r\n # Expand into the electronic spin space\r\n HZ_n_rei = np.kron(np.identity(self.multS), HZ_n_rei)\r\n # Expand into the superhyperfine nuclear spin space\r\n for neigh in self.neighbours: \r\n HZ_n_rei = np.kron(HZ_n_rei, np.identity(neigh.multH))\r\n\r\n # Do the same for the nuclear Zeeman Hamiltonian for the host nuclear spin\r\n HZ_n_host = 0\r\n for neigh_idx, neigh in enumerate(self.neighbours):\r\n # Start with a blank template for the S and I spaces\r\n HZ_n_host_accum = np.kron(np.identity(self.multS), np.identity(self.multI))\r\n HZ_n_host_term = (self.beta_n * neigh.g_n_host * B_vec.dot(neigh.Hv3)).reshape((neigh.multH, neigh.multH))\r\n\r\n # Iterate through the neighbours and expand into their spaces\r\n # When it's its own index we cross the actual Zeeman interaction instead\r\n for inner_neigh_idx, inner_neigh in enumerate(self.neighbours):\r\n if inner_neigh_idx == neigh_idx:\r\n HZ_n_host_accum = np.kron(HZ_n_host_accum, HZ_n_host_term)\r\n else:\r\n HZ_n_host_accum = np.kron(HZ_n_host_accum, np.identity(inner_neigh.multH))\r\n\r\n HZ_n_host += HZ_n_host_accum\r\n\r\n # Reset the Hamiltonian to be only the field-indep HF part and SHF part\r\n self.reset_ham()\r\n # Add in the just-computed Zeeman terms\r\n self.H_gs += (HZ_el_gs - HZ_n_rei - HZ_n_host)\r\n self.H_es += (HZ_el_es - HZ_n_rei - HZ_n_host)", "def block_hash(self, block_hash):\n\n self._block_hash = block_hash", "def generateHamiltonian(self):\n\n nprint(\"\\n Generating Hamiltonian matrix ...\")\n\n # For uniform RGF blocks\n if (self.DevType == self.COH_RGF_UNI): \n # no k-loop, real space hamiltonian only\n if self.kp.N == 0:\n \n lyr0 = self.geom.span(0, self.nw*self.nh-1) # extract block # 0\n lyr1 = self.geom.span(self.nw*self.nh, 2*self.nw*self.nh-1) # extract block # 1\n \n self.H0, self.S0 = generateHamOvl(self.hp, lyr0, lyr0)\n\n self.Hl, S = generateHamOvl(self.hp, lyr1, lyr0)\n\n# np.set_printoptions(linewidth=200)\n# print \"\\nS0\\n\"\n# print self.S0\n# print \"\\nH0\\n\"\n# print self.H0\n# print \"\\nHl\\n\"\n# print self.Hl\n# self.geom.exportGjf('dbg_geom.gjf')\n# lyr0.exportGjf('lyr0.gjf')\n# lyr1.exportGjf('lyr1.gjf')\n# lyr01 = lyr0+lyr1;\n# lyr01.exportGjf('lyr01.gjf')\n \n # nearest neighbors in transverse direction for k-loop\n else:\n self.H0 = []\n self.S0 = []\n self.Hl = []\n self.pv = []\n self.pvl = []\n \n lv = self.geom.LatticeVector\n self.pv.append(lv*LCoord(0,0,0))\n self.pvl.append(lv*LCoord(0,0,0))\n lyr0 = self.geom.span(0, self.nw*self.nh-1) # extract block # 0\n lyr1 = self.geom.span(self.nw*self.nh, 2*self.nw*self.nh-1) # extract block # 1\n \n H, S = generateHamOvl(self.hp, lyr0, lyr0)\n self.H0.append(H); self.S0.append(S)\n H, S = generateHamOvl(self.hp, lyr1, lyr0)\n self.Hl.append(H)\n \n self.pv.append(lv*LCoord(0,1,0))\n lyr0top = lyr0 + self.pv[1] # top neighbor of layer 0\n H, S = generateHamOvl(self.hp, lyr0, lyr0top)\n self.H0.append(H)\n \n self.pv.append(lv*LCoord(0, -1, 0))\n lyr0bot = lyr0 + self.pv[2] # bottom neighbor of layer 0\n H, S = generateHamOvl(self.hp, lyr0, lyr0bot)\n self.H0.append(H)\n \n self.pvl.append(lv*LCoord(-1,1,0))\n H, S = generateHamOvl(self.hp, lyr1, lyr0top)\n self.Hl.append(H)\n \n self.pvl.append(lv*LCoord(-1,-1,0))\n H, S = generateHamOvl(self.hp, lyr1, lyr0bot) \n self.Hl.append(H)\n # Non-uniform RGF blocks \n elif (self.DevType == self.COH_RGF_NON_UNI):\n self.H0 = []\n self.S0 = []\n self.Hl = []\n beg = 0\n for ib in range(0, self.nb): # setup the block hamiltonian\n end = beg + self.nbw[ib] - 1\n \n # generate H_i,i and S_i,i\n lyri = self.geom.span(beg, end) # extract block # i\n H0,S0 = generateHamOvl(self.hp, lyri, lyri)\n self.H0.append(H0)\n self.S0.append(S0)\n\n # generate H_i,i-1\n if ib > 0:\n Hl,Sl = generateHamOvl(self.hp, lyri, lyrim1)\n self.Hl.append(Hl)\n \n lyrim1 = lyri\n beg = end + 1\n # Coupling matrix between two blocks of left contact\n Hl,Sl = generateHamOvl(self.hp, self.lyr_0, self.lyr_0m1)\n self.Hl.insert(0, Hl)\n # Coupling matrix between two blocks of right contact\n Hl,Sl = generateHamOvl(self.hp, self.lyr_nb, self.lyr_nbm1)\n self.Hl.append(Hl)\n\n nprint(\" done.\")", "def set_hbond(self) -> None:\n for h in self.hedra.values():\n if h.e_class == \"HNCA\":\n h.hbond_1 = True\n elif h.e_class == \"CACO\":\n h.hbond_2 = True", "def generate_hamiltonian(self):\n ham = total_hamiltonian(self.cluster, self.magnetic_field, self.zfs, others=self.others,\n other_states=self.other_states, central_gyro=self.gyro, central_spin=self.spin)\n\n if self.pulses is not None:\n self.pulses.generate_pulses(dimensions=ham.dimensions, bath=self.cluster, vectors=ham.vectors)\n\n return ham", "def set_mass_flow(self):\n self.exh.mdot_exp = self.exh.flow_array * self.exh.rho_array\n self.exh.C = self.exh.mdot_exp * self.exh.c_p_air\n self.exh.enthalpy_flow = self.exh.C * self.exh.T_inlet_array", "def self_harm(self, self_harm):\n self.__self_harm = test_input(self_harm)", "def hitter_bat_hand(self, hitter_bat_hand):\n\n self._hitter_bat_hand = hitter_bat_hand", "def initialize_h(self):\n\n\t\tself.H = np.random.random((self._rank, self._samples))", "def set_H0(self):\n self.slot.H0 = self.lf_H0.value()\n self.w_out.comp_output()\n # Notify the machine GUI that the machine has changed\n self.saveNeeded.emit()", "def set_hv_bias(self, hv_bias):\n self.logger.debug(\"Setting HV bias to %s\", hv_bias)\n if hv_bias < 0 or hv_bias > 120:\n raise ValueError(\"HV bias must be between 0 and 120 volts, got \"\n \"{value}\".format(value=hv_bias))\n chips = range(8)\n command = self._construct_command(chips, self.HV_BIAS, str(hv_bias))\n success = self._send_command(command)\n if success:\n self.hv_bias = hv_bias", "def set_hsb(self, hue, saturation, brightness=None, *, index=0,\n transition_time=None):\n self._value_validate(hue, RANGE_HUE, \"Hue\")\n self._value_validate(saturation, RANGE_SATURATION, \"Saturation\")\n\n values = {\n ATTR_LIGHT_COLOR_SATURATION: saturation,\n ATTR_LIGHT_COLOR_HUE: hue\n }\n\n if brightness is not None:\n values[ATTR_LIGHT_DIMMER] = brightness\n self._value_validate(brightness, RANGE_BRIGHTNESS, \"Brightness\")\n\n if transition_time is not None:\n values[ATTR_TRANSITION_TIME] = transition_time\n\n return self.set_values(values, index=index)", "def set_hmf(self, set_mf):\n self.mf = set_mf" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test ExponentialFamily class initialization.
def test_exponential_family_init(): D = 4 N = 100 exp_fam = ExponentialFamily(D) assert exp_fam.D == D assert exp_fam.support_layer is None assert exp_fam.D_eta == D with raises(TypeError): exp_fam = ExponentialFamily('foo') with raises(ValueError): exp_fam = ExponentialFamily(0) with raises(TypeError): exp_fam = ExponentialFamily(4, int) with raises(NotImplementedError): exp_fam.sample_eta(N) mu = np.zeros((D,)) with raises(NotImplementedError): exp_fam.mu_to_eta(mu) eta = np.zeros((D,)) with raises(NotImplementedError): exp_fam.eta_to_mu(eta) z = np.zeros((D,)) with raises(NotImplementedError): exp_fam.T(z) return None
[ "def test_FGDA_init():\n FGDA(metric='riemann')", "def test_FgMDM_init():\n mdm = FgMDM(metric='riemann')", "def test_init(self):\n # Test simple initialization\n sm = self._mock_supermarket_instance()\n self.assertIs(type(sm),supermarket_register.SupermarketRegister)\n\n # Test invalid product code initialization\n product_codes = {\n 'XXXX-XXXX-XXXX-XXX*': {\n 'product': 'a',\n 'price': 1\n }\n }\n with self.assertRaises(ValueError):\n supermarket_register.SupermarketRegister(product_codes)\n\n # Test invalid price initialization\n product_codes = {\n 'XXXX-XXXX-XXXX-XXXX': {\n 'product': 'a',\n 'price': -1\n }\n }\n with self.assertRaises(ValueError):\n supermarket_register.SupermarketRegister(product_codes)", "def __init__(self, expected, test_func):\n self._f = test_func\n self._exp = expected", "def test_frost_number_has_initialize():\n fn = bmi_frost_number.BmiFrostnumberMethod()\n fn.initialize(cfg_file=onesite_oneyear_filename)", "def test_epsf_build_invalid_fitter(self):\n\n with pytest.raises(TypeError):\n EPSFBuilder(fitter=EPSFFitter, maxiters=3)\n\n with pytest.raises(TypeError):\n EPSFBuilder(fitter=LevMarLSQFitter(), maxiters=3)\n\n with pytest.raises(TypeError):\n EPSFBuilder(fitter=LevMarLSQFitter, maxiters=3)", "def test_epsf_build_invalid_fitter(self):\n with pytest.raises(TypeError):\n EPSFBuilder(fitter=EPSFFitter, maxiters=3)\n\n with pytest.raises(TypeError):\n EPSFBuilder(fitter=LevMarLSQFitter(), maxiters=3)\n\n with pytest.raises(TypeError):\n EPSFBuilder(fitter=LevMarLSQFitter, maxiters=3)", "def test_init(self):\n pass", "def test_init(self):\r\n c = AlphaDiversityCalc(observed_otus)\r\n self.assertEqual(c.Metric, observed_otus)\r\n self.assertEqual(c.Params, {})", "def test_sphere_init():\n Sphere(5)", "def test_constructor(self):\n pass", "def test_init_error_handling(self):\n with pytest.raises(ValueError) as err:\n hll = HyperLogLog(2)\n assert err.value.message == \"k=2 should be in range [16, 65536]\"\n with pytest.raises(ValueError) as err:\n hll = HyperLogLog(2**17)\n assert err.value.message == \"k=131072 should be in range [16, 65536]\"\n hll = HyperLogLog(2**16)\n assert hll.k == 2**16\n hll = HyperLogLog64(2**17)\n assert hll.k == 2**17", "def setUp(self):\n self.m = m = random.randint(1, 100)\n self.n = n = random.randint(1, 100)\n self.sig = sig = Signature(\"name\", Dim(\"m\"), Dim(\"n\"),\n sData(\"A\", \"ldA * n\"), Ld(\"ldA\", \"m\"),\n dData(\"B\", \"ldB * m\"), Ld(\"ldB\", \"m\"),\n cData(\"C\", \"ldC * n\"), Ld(\"ldC\", \"n\"))\n self.ex = ex = Experiment()\n ex.calls = [sig(m, n, \"X\", None, \"Y\", None, \"Z\", None)]\n ex.infer_lds()\n self.i = Symbol(\"i\")\n self.j = Symbol(\"j\")", "def setUpClass(cls):\n cls.nhf = nhflux.NhfluxStream.readBinary(SIMPLE_HEXZ_NHFLUX)", "def __init__(\r\n self,\r\n centre=0.0, # <- PyAutoFit recognises these constructor arguments are the model\r\n intensity=0.1, # <- parameters of the Exponential.\r\n rate=0.01,\r\n ):\r\n super(Exponential, self).__init__(centre=centre, intensity=intensity)\r\n\r\n self.rate = rate", "def setUp(self):\n first = 'jeff'\n last = 'bezos'\n self.salary = 100000\n self.employee = Employee(first, last, self.salary)", "def test_hmf_init(self):\n spec = np.random.random((20, 100))\n invvar = np.random.random((20, 100))\n hmf = HMF(spec, invvar)\n assert hmf.K == 4\n assert log.level == 20 # INFO\n hmf = HMF(spec, invvar, K=6, verbose=True)\n assert hmf.K == 6\n assert log.level == 10 # DEBUG", "def test_init_female(self):\r\n female = trk_female.Female(self.arena)\r\n self.assertEqual(female.settings_valid(), False)", "def setUpClass(cls):\n cls.nhf = nhflux.NhfluxStreamVariant.readBinary(SIMPLE_HEXZ_NHFLUX_VARIANT)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the printable length of the Entry's Text
def getTextLength(self): return 0
[ "def length(self, txt):\n return self.fontMetrics().width(self.__surrounding.format(txt))", "def __len__(self):\n return len(self.text)", "def width(self, text):\n return len(text) * (self.font_width + 1)", "def get_length(self) -> int:\n return sum([text.get_length() for text in self._texts])", "def getLength(self):\n return len(self.entries)", "def node_text_length(node):\n return len(' '.join(node.text_content().split()))", "def LEN(text):\n return len(text)", "def printable_length_where(w):\n if sys.version_info[0] >= 3: # pragma: no cover\n stype = str\n else:\n stype = unicode\n sub = w.string[w.character:w.character_end]\n # return len(stype(sub, 'utf-8'))\n # I am not really sure this is what we want\n return len(stype(sub))", "def get_entry_length(self, ii):\n return self.db.root.Protein.Entries[ii - 1]['SeqBufferLength'] - 1", "def _text_length(self, text):\n\n if isinstance(text, dict): # {key: value} case\n return len(next(iter(text.values())))\n elif not hasattr(text, '__len__'): # Object has no len() method\n return 1\n elif len(text) == 0 or isinstance(text[0], int): # Empty string or list of ints\n return len(text)\n else:\n return sum([len(t) for t in text]) # Sum of length of individual strings", "def TextCalcWidth(Text) -> int:\n pass", "def text_length(self, text, fontsize=11, wmode=0):\n return fontsize * sum([self.glyph_advance(ord(c), wmode=wmode) for c in text])", "def field_length(self,\r\n entrylist=None):\r\n\r\n\r\n if entrylist is None:\r\n entrylist = list(self.default_dict['field'].keys())\r\n maxlength = 0\r\n for i_temp in entrylist:\r\n if len(self.default_dict['field'][i_temp]) > maxlength:\r\n maxlength = len(self.default_dict['field'][i_temp])\r\n return maxlength", "def compute_user_description_text_length(row):\n row[\"user_description_text_length\"] = len(row['user_description'])\n return row[\"user_description_text_length\"]", "def get_string_length(self):\n return int(self.read('H')[0])", "def size(self):\n return len(self.chars)", "def visual_len(text) -> int:\n return len(text) if NO_COLOR else len(_remove_regex(\"\\033\\\\[[0-9]*m\", text))", "def printed_length(string):\n # It returns the length of the printed string\n return len(remove_colors(string))", "def _get_text_width(self, text: str) -> int:\n return self._buffer_draw.textsize(text, font=self.FONT)[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper function to generate jitted lanczos function used in JaxBackend.eigsh_lanczos. The function `jax_lanczos` returned by this higherorder function has the following
def _generate_jitted_eigsh_lanczos(jax: types.ModuleType) -> Callable: @functools.partial(jax.jit, static_argnums=(3, 4, 5, 6)) def jax_lanczos(matvec, arguments, init, ncv, neig, landelta, reortho): """ Jitted lanczos routine. Args: matvec: A callable implementing the matrix-vector product of a linear operator. arguments: Arguments to `matvec` additional to an input vector. `matvec` will be called as `matvec(init, *args)`. init: An initial input state to `matvec`. ncv: Number of krylov iterations (i.e. dimension of the Krylov space). neig: Number of eigenvalue-eigenvector pairs to be computed. landelta: Convergence parameter: if the norm of the current Lanczos vector falls below `landelta`, iteration is stopped. reortho: If `True`, reorthogonalize all krylov vectors at each step. This should be used if `neig>1`. Returns: jax.numpy.ndarray: Eigenvalues list: Eigenvectors """ def body_modified_gram_schmidt(i, vals): vector, krylov_vectors = vals v = krylov_vectors[i, :] vector -= jax.numpy.vdot(v, vector) * jax.numpy.reshape(v, vector.shape) return [vector, krylov_vectors] def body_lanczos(vals): current_vector, krylov_vectors, vector_norms = vals[0:3] diagonal_elements, matvec, args, _ = vals[3:7] threshold, i, maxiteration = vals[7:] norm = jax.numpy.linalg.norm(current_vector) normalized_vector = current_vector / norm normalized_vector, krylov_vectors = jax.lax.cond( reortho, True, lambda x: jax.lax.fori_loop(0, i, body_modified_gram_schmidt, [normalized_vector, krylov_vectors]), False, lambda x: [normalized_vector, krylov_vectors]) Av = matvec(normalized_vector, *args) diag_element = jax.numpy.vdot(normalized_vector, Av) res = jax.numpy.reshape( jax.numpy.ravel(Av) - jax.numpy.ravel(normalized_vector) * diag_element - krylov_vectors[i - 1] * norm, Av.shape) krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[i, :], jax.numpy.ravel(normalized_vector)) vector_norms = jax.ops.index_update(vector_norms, jax.ops.index[i - 1], norm) diagonal_elements = jax.ops.index_update(diagonal_elements, jax.ops.index[i - 1], diag_element) return [ res, krylov_vectors, vector_norms, diagonal_elements, matvec, args, norm, threshold, i + 1, maxiteration ] def cond_fun(vals): _, _, _, _, _, _, norm, threshold, iteration, maxiteration = vals def check_thresh(check_vals): val, thresh = check_vals return jax.lax.cond(val < thresh, False, lambda x: x, True, lambda x: x) return jax.lax.cond(iteration <= maxiteration, [norm, threshold], check_thresh, False, lambda x: x) numel = jax.numpy.prod(init.shape) krylov_vecs = jax.numpy.zeros((ncv + 1, numel), dtype=init.dtype) norms = jax.numpy.zeros(ncv, dtype=init.dtype) diag_elems = jax.numpy.zeros(ncv, dtype=init.dtype) norms = jax.ops.index_update(norms, jax.ops.index[0], 1.0) norms_dtype = jax.numpy.real(jax.numpy.empty((0, 0), dtype=init.dtype)).dtype initvals = [ init, krylov_vecs, norms, diag_elems, matvec, arguments, norms_dtype.type(1.0), landelta, 1, ncv ] output = jax.lax.while_loop(cond_fun, body_lanczos, initvals) final_state, krylov_vecs, norms, diags, _, _, _, _, it, _ = output krylov_vecs = jax.ops.index_update(krylov_vecs, jax.ops.index[it, :], jax.numpy.ravel(final_state)) A_tridiag = jax.numpy.diag(diags) + jax.numpy.diag( norms[1:], 1) + jax.numpy.diag(jax.numpy.conj(norms[1:]), -1) eigvals, U = jax.numpy.linalg.eigh(A_tridiag) eigvals = eigvals.astype(A_tridiag.dtype) def body_vector(i, vals): krv, unitary, states = vals dim = unitary.shape[1] n, m = jax.numpy.divmod(i, dim) states = jax.ops.index_add(states, jax.ops.index[n, :], krv[m + 1, :] * unitary[m, n]) return [krv, unitary, states] state_vectors = jax.numpy.zeros([neig, numel], dtype=init.dtype) _, _, vectors = jax.lax.fori_loop(0, neig * (krylov_vecs.shape[0] - 1), body_vector, [krylov_vecs, U, state_vectors]) return jax.numpy.array(eigvals[0:neig]), [ jax.numpy.reshape(vectors[n, :], init.shape) / jax.numpy.linalg.norm(vectors[n, :]) for n in range(neig) ] return jax_lanczos
[ "def _generate_arnoldi_factorization(jax: types.ModuleType) -> Callable:\n\n @jax.jit\n def modified_gram_schmidt_step_arnoldi(j, vals):\n \"\"\"\n Single step of a modified gram-schmidt orthogonalization.\n Args:\n j: Integer value denoting the vector to be orthogonalized.\n vals: A list of variables:\n `vector`: The current vector to be orthogonalized\n to all previous ones\n `krylov_vectors`: jax.array of collected krylov vectors\n `n`: integer denoting the column-position of the overlap\n <`krylov_vector`|`vector`> within `H`.\n Returns:\n updated vals.\n\n \"\"\"\n vector, krylov_vectors, n, H = vals\n v = krylov_vectors[j, :]\n h = jax.numpy.vdot(v, vector)\n H = jax.ops.index_update(H, jax.ops.index[j, n], h)\n vector = vector - h * jax.numpy.reshape(v, vector.shape)\n return [vector, krylov_vectors, n, H]\n\n @functools.partial(jax.jit, static_argnums=(5, 6, 7))\n def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs,\n eps):\n \"\"\"\n Compute an m-step arnoldi factorization of `matvec`, with\n m = min(`it`,`num_krylov_vecs`). The factorization will\n do at most `num_krylov_vecs` steps. The returned arrays\n `kv` and `H` will satisfy the Arnoldi recurrence relation\n ```\n matrix @ Vm - Vm @ Hm - fm * em = 0\n ```\n with `matrix` the matrix representation of `matvec` and\n `Vm = jax.numpy.transpose(kv[:it, :])`,\n `Hm = H[:it, :it]`, `fm = np.expand_dims(kv[it, :] * H[it, it - 1]`,1)\n and `em` a cartesian basis vector of shape `(1, kv.shape[1])`\n with `em[0, -1] == 1` and 0 elsewhere.\n\n Note that the caller is responsible for dtype consistency between\n the inputs, i.e. dtypes between all input arrays have to match.\n\n Args:\n matvec: The matrix vector product.\n args: List of arguments to `matvec`.\n v0: Initial state to `matvec`.\n krylov_vectors: An array for storing the krylov vectors. The individual\n vectors are stored as columns.\n The shape of `krylov_vecs` has to be\n (num_krylov_vecs + 1, np.ravel(v0).shape[0]).\n H: Matrix of overlaps. The shape has to be\n (num_krylov_vecs + 1,num_krylov_vecs + 1).\n start: Integer denoting the start position where the first\n produced krylov_vector should be inserted into `krylov_vectors`\n num_krylov_vecs: Number of krylov iterations, should be identical to\n `krylov_vectors.shape[0] + 1`\n eps: Convergence parameter. Iteration is terminated if the norm of a\n krylov-vector falls below `eps`.\n Returns:\n kv: An array of krylov vectors\n H: A matrix of overlaps\n it: The number of performed iterations.\n \"\"\"\n Z = jax.numpy.linalg.norm(v0)\n v = v0 / Z\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[start, :],\n jax.numpy.ravel(v))\n H = jax.lax.cond(\n start > 0, start,\n lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None,\n lambda x: H)\n\n # body of the arnoldi iteration\n def body(vals):\n krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals\n Av = matvec(vector, *args)\n initial_vals = [Av, krylov_vectors, i, H]\n Av, krylov_vectors, _, H = jax.lax.fori_loop(\n 0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals)\n norm = jax.numpy.linalg.norm(Av)\n Av /= norm\n H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm)\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[i + 1, :],\n jax.numpy.ravel(Av))\n return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter]\n\n def cond_fun(vals):\n # Continue loop while iteration < num_krylov_vecs and norm > eps\n _, _, _, _, norm, _, iteration, _ = vals\n counter_done = (iteration >= num_krylov_vecs)\n norm_not_too_small = norm > eps\n continue_iteration = jax.lax.cond(counter_done,\n _, lambda x: False,\n _, lambda x: norm_not_too_small)\n\n return continue_iteration\n initial_norm = v.real.dtype.type(1.0+eps)\n initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start,\n num_krylov_vecs]\n final_values = jax.lax.while_loop(cond_fun, body, initial_values)\n kvfinal, Hfinal, _, _, norm, _, it, _ = final_values\n return kvfinal, Hfinal, it, norm < eps\n\n return _arnoldi_fact", "def my_jacfwd_novmap(f):\n def jacfun(x, **params):\n # create little function that grabs tangents (second arg returned, hence [1])\n _jvp = lambda s: jax.jvp(f, (x,), (s,))[1]\n # evaluate tangents on standard basis. Note we are only mapping over tangents arg of jvp\n #Jt = jax.vmap(_jvp, in_axes=1)(np.eye(len(x)))\n Jt = np.asarray([_jvp(i) for i in np.eye(len(x))])\n #print(Jt.shape)\n #return np.transpose(Jt)\n return np.moveaxis(Jt, 0, -1)\n return jacfun", "def zonal( self, fields, fun ):\n raise NotImplementedError(\"zonal\")", "def _vzlerchphi(self, z: np.ndarray, a: int) -> np.ndarray:\n return np.array([self._zlerchphi(z_, a) for z_ in z])", "def jacobian_numba(coordinates, points, jac, greens_function):\n east, north, upward = coordinates[:]\n point_east, point_north, point_upward = points[:]\n for i in prange(east.size):\n for j in range(point_east.size):\n jac[i, j] = greens_function(\n east[i],\n north[i],\n upward[i],\n point_east[j],\n point_north[j],\n point_upward[j],\n )", "def lanczos_bidiagonalization(A, p0, k):\n \n dtype = p0.dtype\n\n betas = np.zeros(k+1)\n alphas = np.zeros(k)\n m, n = A.dims()\n U = np.zeros((m, k+1), dtype=dtype)\n V = np.zeros((n, k+1), dtype=dtype)\n\n # init\n betas[0] = norm(p0)\n U[:, 0] = p0/betas[0]\n V[:, 0] = 0 \n for i in range(k):\n r = A.compute_ATx(U[:, i]) \n if i > 0: \n r -= np.dot(betas[0], V[:, i]) \n alphas[i] = norm(r)\n V[:, i+1] = r/alphas[i]\n p = A.compute_Ax(V[:, i+1]) - alphas[i]*U[:, i]\n betas[i+1] = norm(p)\n U[:, i+1] = p / betas[i+1]\n\n B = np.zeros((k+1, k), dtype = dtype)\n\n for i in range(k):\n B[i, i] = alphas[i]\n B[i+1, i] = betas[i+1]\n\n return B, U, V[:, 1:]", "def zenazi(scx_l, scx_b, scy_l, scy_b, scz_l, scz_b, src_l, src_b):\n # Zenith is the distance from the optical axis (here z)\n costheta = GreatCircle(scz_l,scz_b,src_l,src_b) \n # Azimuth is the combination of the remaining two\n cosx = GreatCircle(scx_l,scx_b,src_l,src_b)\n cosy = GreatCircle(scy_l,scy_b,src_l,src_b)\n \n # check exceptions\n # maybe not for vectorisation\n \"\"\"\n if costheta.size == 1:\n if (costheta > 1.0):\n costheta = 1.0\n if (costheta < -1.0):\n costheta = -1.0\n else:\n costheta[costheta > 1.0] = 1.0\n costheta[costheta < -1.0] = -1.0\n \"\"\"\n # theta = zenith\n theta = np.rad2deg(np.arccos(costheta))\n # phi = azimuth\n phi = np.rad2deg(np.arctan2(cosy,cosx)) # TS January 14: you sure about that? changed y and x\n \n # make azimuth going from 0 to 360 deg\n if phi.size == 1:\n if (phi < 0):\n phi += 360\n else:\n phi[phi < 0] += 360\n \n return theta,phi", "def make_vector_laplace(bcs: Boundaries) -> OperatorType:\n assert isinstance(bcs.grid, CylindricalSymGrid)\n bcs.check_value_rank(1)\n\n laplace_r = make_laplace(bcs.extract_component(0))\n laplace_z = make_laplace(bcs.extract_component(1))\n laplace_phi = make_laplace(bcs.extract_component(2))\n\n @jit_allocate_out(out_shape=(3,) + bcs.grid.shape)\n def vector_laplace(arr, out=None):\n \"\"\"apply gradient operator to array `arr`\"\"\"\n laplace_r(arr[0], out=out[0])\n laplace_z(arr[1], out=out[1])\n laplace_phi(arr[2], out=out[2])\n return out\n\n return vector_laplace # type: ignore", "def test_gell_mann_sparse_2():\n res = gen_gell_mann(205, 34, 500, True)\n\n assert res[34, 205] == -1j\n assert res[205, 34] == 1j", "def Lanczos(A, k, *, sparse=False, dim=None):\n if sparse:\n n = dim\n dtype = torch.float64\n Amap = A\n else:\n n = A.shape[0]\n dtype = A.dtype\n Amap = lambda v: torch.matmul(A, v)\n Qk = torch.zeros((n, k), dtype=dtype)\n alphas = torch.zeros(k, dtype=dtype)\n betas = torch.zeros(k - 1, dtype=dtype)\n q = torch.randn(n, dtype=dtype)\n q = q / torch.norm(q)\n u = Amap(q)\n alpha = torch.matmul(q, u)\n Qk[:, 0] = q\n alphas[0] = alpha\n beta = 0\n qprime = torch.randn(n, dtype=dtype)\n for i in range(1, k):\n r = u - alpha * q - beta * qprime\n\n # The simple but expensive full reorthogonalization process\n # in order to recover the orthogonality among the Lanczos vectors caused by\n # rounding error in floating point arithmetic.\n r -= torch.matmul(Qk[:, :i], torch.matmul(Qk[:, :i].T, r))\n\n qprime = q\n beta = torch.norm(r)\n q = r / beta\n u = Amap(q)\n alpha = torch.matmul(q, u)\n alphas[i] = alpha\n betas[i - 1] = beta\n Qk[:, i] = q\n T = torch.diag(alphas) + torch.diag(betas, diagonal=1) + torch.diag(betas, diagonal=-1)\n return Qk, T", "def optimisation_factory_Oklab_15() -> (\n Tuple[NDArrayFloat, Callable, Callable, Callable]\n):\n\n x_0 = as_float_array([1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1])\n\n def objective_function(\n M: ArrayLike, RGB: ArrayLike, Jab: ArrayLike\n ) -> NDArrayFloat:\n \"\"\"*Oklab* colourspace based objective function.\"\"\"\n\n M = finaliser_function(M)\n\n XYZ_t = np.transpose(\n np.dot(\n RGB_COLOURSPACE_ACES2065_1.matrix_RGB_to_XYZ,\n np.dot(\n M,\n np.transpose(\n polynomial_expansion_Finlayson2015(RGB, 2, True)\n ),\n ),\n )\n )\n\n Jab_t = XYZ_to_optimization_colour_model(XYZ_t)\n\n return as_float(np.sum(euclidean_distance(Jab, Jab_t)))\n\n def XYZ_to_optimization_colour_model(XYZ: ArrayLike) -> NDArrayFloat:\n \"\"\"*CIE XYZ* colourspace to *Oklab* colourspace function.\"\"\"\n\n return XYZ_to_Oklab(XYZ)\n\n def finaliser_function(M: ArrayLike) -> NDArrayFloat:\n \"\"\"Finaliser function.\"\"\"\n\n return whitepoint_preserving_matrix(\n np.hstack([np.reshape(M, (3, 5)), zeros((3, 1))])\n )\n\n return (\n x_0,\n objective_function,\n XYZ_to_optimization_colour_model,\n finaliser_function,\n )", "def laplacian_(self, grid, i, j):\n l1 = grid[(i+1+self.N) % self.N][j] + grid[(i-1+self.N) % self.N][j]\n l2 = grid[i][(j+1+self.N) % self.N] + grid[i][(j-1+self.N) % self.N]\n l3 = -4*grid[i][j]\n return (l1 + l2 + l3)/self.dx**2", "def get_lattice_vectors(bravais: str) -> typing.Callable:\n\n bravais = bravais.lower()\n\n if bravais == 'triclinic':\n return simple_triclinic()\n\n elif bravais in ['monoclinic', 'simple_monoclinic']:\n return simple_monoclinic()\n\n elif bravais == 'base_centred_monoclinic':\n return base_centred_monoclinic()\n\n elif bravais in ['orthorhombic', 'simple_orthorhombic']:\n return simple_orthorhombic()\n\n elif bravais == 'body_centred_orthorhombic':\n return body_centred_orthorhombic()\n\n elif bravais == 'base_centred_orthorhombic':\n return base_centred_orthorhombic()\n\n elif bravais == 'face_centred_orthorhombic':\n return face_centred_orthorhombic()\n\n elif bravais in ['tetragonal', 'simple_tetragonal']:\n return simple_tetragonal()\n\n elif bravais == 'body_centred_tetragonal':\n return body_centred_tetragonal()\n\n elif bravais == 'rhombohedral':\n return rhombohedral()\n\n elif bravais == 'hexagonal':\n return hexagonal()\n\n elif bravais in ['face_centred_cubic', 'fcc']:\n return face_centred_cubic()\n\n elif bravais in ['body_centred_cubic', 'bcc']:\n return body_centred_cubic()\n\n elif bravais in ['simple_cubic', 'cubic']:\n return simple_cubic()\n\n else:\n error(\"Choice of bravais is erroneous: \", bravais)", "def _cz(self, g: common_gates.CZPowGate, control_axis: int, target_axis: int):", "def get_lapack_funcs(names, arrays=(), dtype=None):\n return _get_funcs(names, arrays, dtype,\n \"LAPACK\", _flapack, _clapack,\n \"flapack\", \"clapack\", _lapack_alias)", "def laplacian_mat(n):\n data = [1, -2, 1]*n\n i = flatten([[k,k,k] for k in range(n)])\n j = flatten([[k-1, k, k+1] for k in range(n)])\n return scipy.sparse.coo_matrix((data[1:-1], (i[1:-1], j[1:-1])))", "def blas_header_text():\r\n header = \"\"\"\r\n extern \"C\"\r\n {\r\n\r\n void xerbla_(char*, void *);\r\n\r\n /***********/\r\n /* Level 1 */\r\n /***********/\r\n\r\n /* Single Precision */\r\n\r\n void srot_(const int*, float *, const int*, float *, const int*, const float *, const float *);\r\n void srotg_(float *,float *,float *,float *); \r\n void srotm_( const int*, float *, const int*, float *, const int*, const float *);\r\n void srotmg_(float *,float *,float *,const float *, float *);\r\n void sswap_( const int*, float *, const int*, float *, const int*);\r\n void scopy_( const int*, const float *, const int*, float *, const int*);\r\n void saxpy_( const int*, const float *, const float *, const int*, float *, const int*);\r\n float sdot_(const int*, const float *, const int*, const float *, const int*);\r\n void sdot_sub_(const int*, const float *, const int*, const float *, const int*, float *);\r\n void sdsdot_sub_( const int*, const float *, const float *, const int*, const float *, const int*, float *);\r\n void sscal_( const int*, const float *, float *, const int*);\r\n void snrm2_sub_( const int*, const float *, const int*, float *);\r\n void sasum_sub_( const int*, const float *, const int*, float *);\r\n void isamax_sub_( const int*, const float * , const int*, const int*);\r\n\r\n /* Double Precision */\r\n\r\n void drot_(const int*, double *, const int*, double *, const int*, const double *, const double *);\r\n void drotg_(double *,double *,double *,double *); \r\n void drotm_( const int*, double *, const int*, double *, const int*, const double *);\r\n void drotmg_(double *,double *,double *,const double *, double *);\r\n void dswap_( const int*, double *, const int*, double *, const int*);\r\n void dcopy_( const int*, const double *, const int*, double *, const int*);\r\n void daxpy_( const int*, const double *, const double *, const int*, double *, const int*);\r\n void dswap_( const int*, double *, const int*, double *, const int*);\r\n double ddot_(const int*, const double *, const int*, const double *, const int*);\r\n void dsdot_sub_(const int*, const float *, const int*, const float *, const int*, double *);\r\n void ddot_sub_( const int*, const double *, const int*, const double *, const int*, double *);\r\n void dscal_( const int*, const double *, double *, const int*);\r\n void dnrm2_sub_( const int*, const double *, const int*, double *);\r\n void dasum_sub_( const int*, const double *, const int*, double *);\r\n void idamax_sub_( const int*, const double * , const int*, const int*);\r\n\r\n /* Single Complex Precision */\r\n\r\n void cswap_( const int*, void *, const int*, void *, const int*);\r\n void ccopy_( const int*, const void *, const int*, void *, const int*);\r\n void caxpy_( const int*, const void *, const void *, const int*, void *, const int*);\r\n void cswap_( const int*, void *, const int*, void *, const int*);\r\n void cdotc_sub_( const int*, const void *, const int*, const void *, const int*, void *);\r\n void cdotu_sub_( const int*, const void *, const int*, const void *, const int*, void *);\r\n void cscal_( const int*, const void *, void *, const int*);\r\n void icamax_sub_( const int*, const void *, const int*, const int*);\r\n void csscal_( const int*, const float *, void *, const int*);\r\n void scnrm2_sub_( const int*, const void *, const int*, float *);\r\n void scasum_sub_( const int*, const void *, const int*, float *);\r\n\r\n /* Double Complex Precision */\r\n\r\n void zswap_( const int*, void *, const int*, void *, const int*);\r\n void zcopy_( const int*, const void *, const int*, void *, const int*);\r\n void zaxpy_( const int*, const void *, const void *, const int*, void *, const int*);\r\n void zswap_( const int*, void *, const int*, void *, const int*);\r\n void zdotc_sub_( const int*, const void *, const int*, const void *, const int*, void *);\r\n void zdotu_sub_( const int*, const void *, const int*, const void *, const int*, void *);\r\n void zdscal_( const int*, const double *, void *, const int*);\r\n void zscal_( const int*, const void *, void *, const int*);\r\n void dznrm2_sub_( const int*, const void *, const int*, double *);\r\n void dzasum_sub_( const int*, const void *, const int*, double *);\r\n void izamax_sub_( const int*, const void *, const int*, const int*);\r\n\r\n /***********/\r\n /* Level 2 */\r\n /***********/\r\n\r\n /* Single Precision */\r\n\r\n void sgemv_(char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void sgbmv_(char*, const int*, const int*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void ssymv_(char*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void ssbmv_(char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void sspmv_(char*, const int*, const float *, const float *, const float *, const int*, const float *, float *, const int*);\r\n void strmv_( char*, char*, char*, const int*, const float *, const int*, float *, const int*);\r\n void stbmv_( char*, char*, char*, const int*, const int*, const float *, const int*, float *, const int*);\r\n void strsv_( char*, char*, char*, const int*, const float *, const int*, float *, const int*);\r\n void stbsv_( char*, char*, char*, const int*, const int*, const float *, const int*, float *, const int*);\r\n void stpmv_( char*, char*, char*, const int*, const float *, float *, const int*);\r\n void stpsv_( char*, char*, char*, const int*, const float *, float *, const int*);\r\n void sger_( const int*, const int*, const float *, const float *, const int*, const float *, const int*, float *, const int*);\r\n void ssyr_(char*, const int*, const float *, const float *, const int*, float *, const int*);\r\n void sspr_(char*, const int*, const float *, const float *, const int*, float *); \r\n void sspr2_(char*, const int*, const float *, const float *, const int*, const float *, const int*, float *); \r\n void ssyr2_(char*, const int*, const float *, const float *, const int*, const float *, const int*, float *, const int*);\r\n\r\n /* Double Precision */\r\n\r\n void dgemv_(char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void dgbmv_(char*, const int*, const int*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void dsymv_(char*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void dsbmv_(char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void dspmv_(char*, const int*, const double *, const double *, const double *, const int*, const double *, double *, const int*);\r\n void dtrmv_( char*, char*, char*, const int*, const double *, const int*, double *, const int*);\r\n void dtbmv_( char*, char*, char*, const int*, const int*, const double *, const int*, double *, const int*);\r\n void dtrsv_( char*, char*, char*, const int*, const double *, const int*, double *, const int*);\r\n void dtbsv_( char*, char*, char*, const int*, const int*, const double *, const int*, double *, const int*);\r\n void dtpmv_( char*, char*, char*, const int*, const double *, double *, const int*);\r\n void dtpsv_( char*, char*, char*, const int*, const double *, double *, const int*);\r\n void dger_( const int*, const int*, const double *, const double *, const int*, const double *, const int*, double *, const int*);\r\n void dsyr_(char*, const int*, const double *, const double *, const int*, double *, const int*);\r\n void dspr_(char*, const int*, const double *, const double *, const int*, double *); \r\n void dspr2_(char*, const int*, const double *, const double *, const int*, const double *, const int*, double *); \r\n void dsyr2_(char*, const int*, const double *, const double *, const int*, const double *, const int*, double *, const int*);\r\n\r\n /* Single Complex Precision */\r\n\r\n void cgemv_(char*, const int*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\r\n void cgbmv_(char*, const int*, const int*, const int*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\r\n void chemv_(char*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\r\n void chbmv_(char*, const int*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\r\n void chpmv_(char*, const int*, const void *, const void *, const void *, const int*, const void *, void *, const int*);\r\n void ctrmv_( char*, char*, char*, const int*, const void *, const int*, void *, const int*);\r\n void ctbmv_( char*, char*, char*, const int*, const int*, const void *, const int*, void *, const int*);\r\n void ctpmv_( char*, char*, char*, const int*, const void *, void *, const int*);\r\n void ctrsv_( char*, char*, char*, const int*, const void *, const int*, void *, const int*);\r\n void ctbsv_( char*, char*, char*, const int*, const int*, const void *, const int*, void *, const int*);\r\n void ctpsv_( char*, char*, char*, const int*, const void *, void *,const int*);\r\n void cgerc_( const int*, const int*, const void *, const void *, const int*, const void *, const int*, void *, const int*);\r\n void cgeru_( const int*, const int*, const void *, const void *, const int*, const void *, const int*, void *, const int*);\r\n void cher_(char*, const int*, const float *, const void *, const int*, void *, const int*);\r\n void cher2_(char*, const int*, const void *, const void *, const int*, const void *, const int*, void *, const int*);\r\n void chpr_(char*, const int*, const float *, const void *, const int*, void *);\r\n void chpr2_(char*, const int*, const float *, const void *, const int*, const void *, const int*, void *);\r\n\r\n /* Double Complex Precision */\r\n\r\n void zgemv_(char*, const int*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\r\n void zgbmv_(char*, const int*, const int*, const int*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\r\n void zhemv_(char*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\r\n void zhbmv_(char*, const int*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\r\n void zhpmv_(char*, const int*, const void *, const void *, const void *, const int*, const void *, void *, const int*);\r\n void ztrmv_( char*, char*, char*, const int*, const void *, const int*, void *, const int*);\r\n void ztbmv_( char*, char*, char*, const int*, const int*, const void *, const int*, void *, const int*);\r\n void ztpmv_( char*, char*, char*, const int*, const void *, void *, const int*);\r\n void ztrsv_( char*, char*, char*, const int*, const void *, const int*, void *, const int*);\r\n void ztbsv_( char*, char*, char*, const int*, const int*, const void *, const int*, void *, const int*);\r\n void ztpsv_( char*, char*, char*, const int*, const void *, void *,const int*);\r\n void zgerc_( const int*, const int*, const void *, const void *, const int*, const void *, const int*, void *, const int*);\r\n void zgeru_( const int*, const int*, const void *, const void *, const int*, const void *, const int*, void *, const int*);\r\n void zher_(char*, const int*, const double *, const void *, const int*, void *, const int*);\r\n void zher2_(char*, const int*, const void *, const void *, const int*, const void *, const int*, void *, const int*);\r\n void zhpr_(char*, const int*, const double *, const void *, const int*, void *);\r\n void zhpr2_(char*, const int*, const double *, const void *, const int*, const void *, const int*, void *);\r\n\r\n /***********/\r\n /* Level 3 */\r\n /***********/\r\n\r\n /* Single Precision */\r\n\r\n void sgemm_(char*, char*, const int*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void ssymm_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void ssyrk_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, float *, const int*);\r\n void ssyr2k_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void strmm_(char*, char*, char*, char*, const int*, const int*, const float *, const float *, const int*, float *, const int*);\r\n void strsm_(char*, char*, char*, char*, const int*, const int*, const float *, const float *, const int*, float *, const int*);\r\n\r\n /* Double Precision */\r\n\r\n void dgemm_(char*, char*, const int*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void dsymm_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void dsyrk_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, double *, const int*);\r\n void dsyr2k_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void dtrmm_(char*, char*, char*, char*, const int*, const int*, const double *, const double *, const int*, double *, const int*);\r\n void dtrsm_(char*, char*, char*, char*, const int*, const int*, const double *, const double *, const int*, double *, const int*);\r\n\r\n /* Single Complex Precision */\r\n\r\n void cgemm_(char*, char*, const int*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void csymm_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void chemm_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void csyrk_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, float *, const int*);\r\n void cherk_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, float *, const int*);\r\n void csyr2k_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void cher2k_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void ctrmm_(char*, char*, char*, char*, const int*, const int*, const float *, const float *, const int*, float *, const int*);\r\n void ctrsm_(char*, char*, char*, char*, const int*, const int*, const float *, const float *, const int*, float *, const int*);\r\n\r\n /* Double Complex Precision */\r\n\r\n void zgemm_(char*, char*, const int*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void zsymm_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void zhemm_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void zsyrk_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, double *, const int*);\r\n void zherk_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, double *, const int*);\r\n void zsyr2k_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void zher2k_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void ztrmm_(char*, char*, char*, char*, const int*, const int*, const double *, const double *, const int*, double *, const int*);\r\n void ztrsm_(char*, char*, char*, char*, const int*, const int*, const double *, const double *, const int*, double *, const int*);\r\n\r\n }\r\n \"\"\"\r\n\r\n if detect_macos_sdot_bug():\r\n if detect_macos_sdot_bug.fix_works:\r\n header += textwrap.dedent(\"\"\"\\\r\n extern \"C\" float cblas_sdot(int, float*, int, float*, int);\r\n static float sdot_(int* Nx, float* x, int* Sx, float* y, int* Sy)\r\n {\r\n return cblas_sdot(*Nx, x, *Sx, y, *Sy);\r\n }\r\n \"\"\")\r\n else:\r\n # Make sure the buggy version of sdot_ is never used\r\n header += textwrap.dedent(\"\"\"\\\r\n static float sdot_(int* Nx, float* x, int* Sx, float* y, int* Sy)\r\n {\r\n fprintf(stderr,\r\n \"FATAL: The implementation of BLAS SDOT \"\r\n \"routine in your system has a bug that \"\r\n \"makes it return wrong results.\\\\n\"\r\n \"Please contact theano-dev@groups.google.com.\\\\n\"\r\n \"You can work around this bug by using a \"\r\n \"different BLAS library, or disabling BLAS\\\\n\");\r\n assert(0);\r\n }\r\n \"\"\")\r\n\r\n return header", "def generate_raw_decomposition(alphabeta, lanczos_iterations=None):\n # extract matrix elements\n alpha, beta = alphabeta\n\n # trim vectors\n if (lanczos_iterations is not None):\n (alpha, beta) = (alpha[:lanczos_iterations],beta[:lanczos_iterations-1])\n\n # generate Lanczos decomposition\n eigvals, eigvecs = linalg.eigh_tridiagonal(alpha, beta)\n raw_decomposition = [\n (eigval,eigvecs[0, i]**2)\n for i, eigval in enumerate(eigvals)\n ]\n\n return raw_decomposition", "def isYZPlanar(points=[]):\n return isCardinalPlanar(\"yz\",points)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper function to create a jitted arnoldi factorization. The function returns a function `_arnoldi_fact` which performs an mstep arnoldi factorization. `_arnoldi_fact` computes an mstep arnoldi factorization of an input callable `matvec`, with m = min(`it`,`num_krylov_vecs`). `_arnoldi_fact` will do at most `num_krylov_vecs` steps. `_arnoldi_fact` returns arrays `kv` and `H` which satisfy the Arnoldi recurrence relation ``` matrix @ Vm Vm @ Hm fm em = 0 ``` with `matrix` the matrix representation of `matvec` and
def _generate_arnoldi_factorization(jax: types.ModuleType) -> Callable: @jax.jit def modified_gram_schmidt_step_arnoldi(j, vals): """ Single step of a modified gram-schmidt orthogonalization. Args: j: Integer value denoting the vector to be orthogonalized. vals: A list of variables: `vector`: The current vector to be orthogonalized to all previous ones `krylov_vectors`: jax.array of collected krylov vectors `n`: integer denoting the column-position of the overlap <`krylov_vector`|`vector`> within `H`. Returns: updated vals. """ vector, krylov_vectors, n, H = vals v = krylov_vectors[j, :] h = jax.numpy.vdot(v, vector) H = jax.ops.index_update(H, jax.ops.index[j, n], h) vector = vector - h * jax.numpy.reshape(v, vector.shape) return [vector, krylov_vectors, n, H] @functools.partial(jax.jit, static_argnums=(5, 6, 7)) def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs, eps): """ Compute an m-step arnoldi factorization of `matvec`, with m = min(`it`,`num_krylov_vecs`). The factorization will do at most `num_krylov_vecs` steps. The returned arrays `kv` and `H` will satisfy the Arnoldi recurrence relation ``` matrix @ Vm - Vm @ Hm - fm * em = 0 ``` with `matrix` the matrix representation of `matvec` and `Vm = jax.numpy.transpose(kv[:it, :])`, `Hm = H[:it, :it]`, `fm = np.expand_dims(kv[it, :] * H[it, it - 1]`,1) and `em` a cartesian basis vector of shape `(1, kv.shape[1])` with `em[0, -1] == 1` and 0 elsewhere. Note that the caller is responsible for dtype consistency between the inputs, i.e. dtypes between all input arrays have to match. Args: matvec: The matrix vector product. args: List of arguments to `matvec`. v0: Initial state to `matvec`. krylov_vectors: An array for storing the krylov vectors. The individual vectors are stored as columns. The shape of `krylov_vecs` has to be (num_krylov_vecs + 1, np.ravel(v0).shape[0]). H: Matrix of overlaps. The shape has to be (num_krylov_vecs + 1,num_krylov_vecs + 1). start: Integer denoting the start position where the first produced krylov_vector should be inserted into `krylov_vectors` num_krylov_vecs: Number of krylov iterations, should be identical to `krylov_vectors.shape[0] + 1` eps: Convergence parameter. Iteration is terminated if the norm of a krylov-vector falls below `eps`. Returns: kv: An array of krylov vectors H: A matrix of overlaps it: The number of performed iterations. """ Z = jax.numpy.linalg.norm(v0) v = v0 / Z krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[start, :], jax.numpy.ravel(v)) H = jax.lax.cond( start > 0, start, lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None, lambda x: H) # body of the arnoldi iteration def body(vals): krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals Av = matvec(vector, *args) initial_vals = [Av, krylov_vectors, i, H] Av, krylov_vectors, _, H = jax.lax.fori_loop( 0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals) norm = jax.numpy.linalg.norm(Av) Av /= norm H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm) krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[i + 1, :], jax.numpy.ravel(Av)) return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter] def cond_fun(vals): # Continue loop while iteration < num_krylov_vecs and norm > eps _, _, _, _, norm, _, iteration, _ = vals counter_done = (iteration >= num_krylov_vecs) norm_not_too_small = norm > eps continue_iteration = jax.lax.cond(counter_done, _, lambda x: False, _, lambda x: norm_not_too_small) return continue_iteration initial_norm = v.real.dtype.type(1.0+eps) initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start, num_krylov_vecs] final_values = jax.lax.while_loop(cond_fun, body, initial_values) kvfinal, Hfinal, _, _, norm, _, it, _ = final_values return kvfinal, Hfinal, it, norm < eps return _arnoldi_fact
[ "def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs,\n eps):\n Z = jax.numpy.linalg.norm(v0)\n v = v0 / Z\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[start, :],\n jax.numpy.ravel(v))\n H = jax.lax.cond(\n start > 0, start,\n lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None,\n lambda x: H)\n\n # body of the arnoldi iteration\n def body(vals):\n krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals\n Av = matvec(vector, *args)\n initial_vals = [Av, krylov_vectors, i, H]\n Av, krylov_vectors, _, H = jax.lax.fori_loop(\n 0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals)\n norm = jax.numpy.linalg.norm(Av)\n Av /= norm\n H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm)\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[i + 1, :],\n jax.numpy.ravel(Av))\n return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter]\n\n def cond_fun(vals):\n # Continue loop while iteration < num_krylov_vecs and norm > eps\n _, _, _, _, norm, _, iteration, _ = vals\n counter_done = (iteration >= num_krylov_vecs)\n norm_not_too_small = norm > eps\n continue_iteration = jax.lax.cond(counter_done,\n _, lambda x: False,\n _, lambda x: norm_not_too_small)\n\n return continue_iteration\n initial_norm = v.real.dtype.type(1.0+eps)\n initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start,\n num_krylov_vecs]\n final_values = jax.lax.while_loop(cond_fun, body, initial_values)\n kvfinal, Hfinal, _, _, norm, _, it, _ = final_values\n return kvfinal, Hfinal, it, norm < eps", "def implicitly_restarted_arnoldi_method(\n matvec, args, initial_state, num_krylov_vecs, numeig, which, eps, maxiter,\n res_thresh) -> Tuple[List[Tensor], List[Tensor]]:\n N = np.prod(initial_state.shape)\n p = num_krylov_vecs - numeig\n num_krylov_vecs = np.min([num_krylov_vecs, N])\n if (p <= 1) and (num_krylov_vecs < N):\n raise ValueError(f\"`num_krylov_vecs` must be between `numeig` + 1 <\"\n f\" `num_krylov_vecs` <= N={N},\"\n f\" `num_krylov_vecs`={num_krylov_vecs}\")\n\n dtype = initial_state.dtype\n # initialize arrays\n krylov_vectors = jax.numpy.zeros(\n (num_krylov_vecs + 1, jax.numpy.ravel(initial_state).shape[0]),\n dtype=dtype)\n H = jax.numpy.zeros((num_krylov_vecs + 1, num_krylov_vecs), dtype=dtype)\n # perform initial arnoldi factorization\n Vm_tmp, Hm_tmp, numits, converged = arnoldi_fact(matvec, args,\n initial_state,\n krylov_vectors, H, 0,\n num_krylov_vecs, eps)\n # obtain an m-step arnoldi factorization\n Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, numits)\n\n it = 0\n if which == 'LR':\n _which = 0\n elif which == 'LM':\n _which = 1\n else:\n raise ValueError(f\"which = {which} not implemented\")\n # make sure the dtypes are matching\n if maxiter > 0:\n if Vm.dtype == np.float64:\n dtype = np.complex128\n elif Vm.dtype == np.float32:\n dtype = np.complex64\n elif Vm.dtype == np.complex128:\n dtype = Vm.dtype\n elif Vm.dtype == np.complex64:\n dtype = Vm.dtype\n else:\n raise TypeError(f'dtype {Vm.dtype} not supported')\n Vm = Vm.astype(dtype)\n Hm = Hm.astype(dtype)\n fm = fm.astype(dtype)\n\n while (it < maxiter) and (not converged):\n evals, _ = jax.numpy.linalg.eig(Hm)\n krylov_vectors, H, fk, converged = shifted_QR(Vm, Hm, fm, evals, numeig,\n p, _which, res_thresh)\n if converged:\n break\n v0 = jax.numpy.reshape(fk, initial_state.shape)\n # restart\n Vm_tmp, Hm_tmp, _, converged = arnoldi_fact(matvec, args, v0,\n krylov_vectors, H, numeig,\n num_krylov_vecs, eps)\n Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, num_krylov_vecs)\n it += 1\n\n ev_, U_ = np.linalg.eig(np.array(Hm))\n eigvals = jax.numpy.array(ev_)\n U = jax.numpy.array(U_)\n _, inds = LR_sort(eigvals, _which)\n vectors = get_vectors(Vm, U, inds, numeig)\n\n return eigvals[inds[0:numeig]], [\n jax.numpy.reshape(vectors[n, :], initial_state.shape)\n for n in range(numeig)\n ]", "def _generate_jitted_eigsh_lanczos(jax: types.ModuleType) -> Callable:\n\n @functools.partial(jax.jit, static_argnums=(3, 4, 5, 6))\n def jax_lanczos(matvec, arguments, init, ncv, neig, landelta, reortho):\n \"\"\"\n Jitted lanczos routine.\n Args:\n matvec: A callable implementing the matrix-vector product of a\n linear operator.\n arguments: Arguments to `matvec` additional to an input vector.\n `matvec` will be called as `matvec(init, *args)`.\n init: An initial input state to `matvec`.\n ncv: Number of krylov iterations (i.e. dimension of the Krylov space).\n neig: Number of eigenvalue-eigenvector pairs to be computed.\n landelta: Convergence parameter: if the norm of the current Lanczos vector\n falls below `landelta`, iteration is stopped.\n reortho: If `True`, reorthogonalize all krylov vectors at each step.\n This should be used if `neig>1`.\n Returns:\n jax.numpy.ndarray: Eigenvalues\n list: Eigenvectors\n \"\"\"\n\n def body_modified_gram_schmidt(i, vals):\n vector, krylov_vectors = vals\n v = krylov_vectors[i, :]\n vector -= jax.numpy.vdot(v, vector) * jax.numpy.reshape(v, vector.shape)\n return [vector, krylov_vectors]\n\n def body_lanczos(vals):\n current_vector, krylov_vectors, vector_norms = vals[0:3]\n diagonal_elements, matvec, args, _ = vals[3:7]\n threshold, i, maxiteration = vals[7:]\n norm = jax.numpy.linalg.norm(current_vector)\n normalized_vector = current_vector / norm\n normalized_vector, krylov_vectors = jax.lax.cond(\n reortho, True,\n lambda x: jax.lax.fori_loop(0, i, body_modified_gram_schmidt,\n [normalized_vector, krylov_vectors]),\n False, lambda x: [normalized_vector, krylov_vectors])\n Av = matvec(normalized_vector, *args)\n\n diag_element = jax.numpy.vdot(normalized_vector, Av)\n\n res = jax.numpy.reshape(\n jax.numpy.ravel(Av) -\n jax.numpy.ravel(normalized_vector) * diag_element -\n krylov_vectors[i - 1] * norm, Av.shape)\n krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[i, :],\n jax.numpy.ravel(normalized_vector))\n\n vector_norms = jax.ops.index_update(vector_norms, jax.ops.index[i - 1],\n norm)\n diagonal_elements = jax.ops.index_update(diagonal_elements,\n jax.ops.index[i - 1],\n diag_element)\n\n return [\n res, krylov_vectors, vector_norms, diagonal_elements, matvec, args,\n norm, threshold, i + 1, maxiteration\n ]\n\n def cond_fun(vals):\n _, _, _, _, _, _, norm, threshold, iteration, maxiteration = vals\n\n def check_thresh(check_vals):\n val, thresh = check_vals\n return jax.lax.cond(val < thresh, False, lambda x: x, True, lambda x: x)\n\n return jax.lax.cond(iteration <= maxiteration, [norm, threshold],\n check_thresh, False, lambda x: x)\n\n numel = jax.numpy.prod(init.shape)\n krylov_vecs = jax.numpy.zeros((ncv + 1, numel), dtype=init.dtype)\n norms = jax.numpy.zeros(ncv, dtype=init.dtype)\n diag_elems = jax.numpy.zeros(ncv, dtype=init.dtype)\n\n norms = jax.ops.index_update(norms, jax.ops.index[0], 1.0)\n\n norms_dtype = jax.numpy.real(jax.numpy.empty((0, 0),\n dtype=init.dtype)).dtype\n initvals = [\n init, krylov_vecs, norms, diag_elems, matvec, arguments,\n norms_dtype.type(1.0), landelta, 1, ncv\n ]\n output = jax.lax.while_loop(cond_fun, body_lanczos, initvals)\n final_state, krylov_vecs, norms, diags, _, _, _, _, it, _ = output\n krylov_vecs = jax.ops.index_update(krylov_vecs, jax.ops.index[it, :],\n jax.numpy.ravel(final_state))\n\n A_tridiag = jax.numpy.diag(diags) + jax.numpy.diag(\n norms[1:], 1) + jax.numpy.diag(jax.numpy.conj(norms[1:]), -1)\n eigvals, U = jax.numpy.linalg.eigh(A_tridiag)\n eigvals = eigvals.astype(A_tridiag.dtype)\n\n def body_vector(i, vals):\n krv, unitary, states = vals\n dim = unitary.shape[1]\n n, m = jax.numpy.divmod(i, dim)\n states = jax.ops.index_add(states, jax.ops.index[n, :],\n krv[m + 1, :] * unitary[m, n])\n return [krv, unitary, states]\n\n state_vectors = jax.numpy.zeros([neig, numel], dtype=init.dtype)\n _, _, vectors = jax.lax.fori_loop(0, neig * (krylov_vecs.shape[0] - 1),\n body_vector,\n [krylov_vecs, U, state_vectors])\n\n return jax.numpy.array(eigvals[0:neig]), [\n jax.numpy.reshape(vectors[n, :], init.shape) /\n jax.numpy.linalg.norm(vectors[n, :]) for n in range(neig)\n ]\n\n return jax_lanczos", "def run_fact(train_data, val_data, k):\n size = len(train_data['user_id'])\n train_data_bootstrap = bootstrap_data(train_data, size)\n\n mat, accuracy_train, accuracy_valids, error_train, error_valids = \\\n als(train_data_bootstrap, val_data, k=k, lr=0.01, num_iteration=500000)\n\n return mat", "def svm_admm(X, y, mylambda=1., rho=1., rel_par=1., QUIET = False, MAX_ITER = 200, ABSTOL = 1e-6, RELTOL = 1e-2):\n if not QUIET:\n tic = time.time()\n m, n = X.shape \n y_raveld = y.ravel() \n # A is a matrix given by [-y_j*x_j -y_j]\n A = - np.dot(np.diag(y_raveld), np.concatenate((X, np.ones((m, 1))), axis = 1))\n\n #Data preprocessing\n m, n = A.shape\n \n #ADMM solver\n x = np.zeros((n, N))\n z = np.zeros((n, N))\n u = np.zeros((n, N))\n\n if not QUIET:\n print('\\n%3s\\t%10s\\t%10s\\t%10s\\t%10s\\t%10s' %('iter',\n 'r np.linalg.norm', \n 'eps pri', \n 's np.linalg.norm', \n 'eps dual', \n 'objective'))\n\n # Saving state\n h = {}\n h['objval'] = np.zeros(MAX_ITER)\n h['r_norm'] = np.zeros(MAX_ITER)\n h['s_norm'] = np.zeros(MAX_ITER)\n h['eps_pri'] = np.zeros(MAX_ITER)\n h['eps_dual'] = np.zeros(MAX_ITER)\n\n for k in range(MAX_ITER):\n # x-update \n for i in range(N):\n A_temp = A[i * num_per_batch: (i + 1) * num_per_batch, :]\n y_temp = y[i * num_per_batch: (i + 1) * num_per_batch, :]\n #\n # temp1 = -z[:, i] + u[:, i]\n # fun = lambda x: np.sum(np.maximum(np.dot(A_temp, x.reshape((n, 1))) + 1, np.zeros((num_per_batch, 1)))) + \\\n # rho/2. * np.dot(x + temp1, x + temp1)\n # # np.random.uniform(-1, 1, (n,1))\n # result = scipy.optimize.minimize(fun, 0.1 * np.ones((n, 1)), tol = 1e-8, method = 'Nelder-Mead')\n # x_temp = result.x\n #\n x_var = Variable(n)\n constraints = []\n objective = Minimize(sum_entries(pos( A_temp * x_var + 1)) + rho/2. * sum_squares((x_var - z[:, i] + u[:, i])))\n prob = Problem(objective, constraints)\n result = prob.solve()\n x_temp = x_var.value\n\n x_temp = x_temp.reshape((x_temp.shape[0], 1))\n x[:, i] = x_temp.ravel()\n\n xave = np.mean(x, axis = 1)\n\n # z-update\n zold = np.copy(z)\n x_hat = rel_par * x + (1. - rel_par) * zold\n z = N * rho/(1./mylambda + N * rho) * np.mean(x_hat + u, axis = 1)\n z = z.reshape((z.shape[0], 1))\n z = np.dot(z, np.ones((1, N))) # N columns of the same values\n\n # u-update\n u = u + x_hat - z\n\n # diagnostics, reporting, termination checks\n h['objval'][k] = myobjective(A, mylambda, x, z)\n h['r_norm'][k] = np.linalg.norm(x - z)\n h['s_norm'][k] = np.linalg.norm(rho * (z - zold))\n h['eps_pri'][k] = np.sqrt(n) * ABSTOL+ RELTOL * np.maximum(np.linalg.norm(x), np.linalg.norm(-z))\n h['eps_dual'][k] = np.sqrt(n) * ABSTOL + RELTOL * np.linalg.norm(rho * u)\n if not QUIET:\n print('%4d\\t%10.4f\\t%10.4f\\t%10.4f\\t%10.4f\\t%10.2f' %(k + 1,\\\n h['r_norm'][k],\\\n h['eps_pri'][k],\\\n h['s_norm'][k],\\\n h['eps_dual'][k],\\\n h['objval'][k]))\n\n if (h['r_norm'][k] < h['eps_pri'][k]) and (h['s_norm'][k] < h['eps_dual'][k]):\n break\n\n if not QUIET:\n toc = time.time()-tic\n print(\"\\nElapsed time is %.2f seconds\"%toc)\n\n return z, h", "def inertia_matrix(dyadic, rf):\n return Matrix([[dot(dot(dyadic, i), j) for j in rf] for i in rf])", "def test_matrix_factorization(spark_ctx):\n\n #\n # Basic context setting-up.\n #\n\n dr = Drudge(spark_ctx)\n\n n = Symbol('n')\n r = Range('R', 0, n)\n\n dumms = symbols('a b c d e f g')\n a, b, c, d = dumms[:4]\n dr.set_dumms(r, dumms)\n dr.add_resolver_for_dumms()\n\n # The indexed bases.\n x = IndexedBase('X')\n y = IndexedBase('Y')\n u = IndexedBase('U')\n v = IndexedBase('V')\n t = IndexedBase('T')\n\n #\n # Test case 1.\n #\n # The final expression to optimize is mathematically\n #\n # .. math::\n #\n # (2 X - Y) * (2 U + V)\n #\n # Here, the expression is to be given in its extended form originally, and\n # we test if it can be factorized into something similar to what we have\n # above. Here we have the signs and coefficients to have better code\n # coverage for these cases. This test case more concentrates on the\n # horizontal complexity in the input.\n #\n\n # The target.\n target = dr.define_einst(\n t[a, b],\n 4 * x[a, c] * u[c, b] + 2 * x[a, c] * v[c, b]\n - 2 * y[a, c] * u[c, b] - y[a, c] * v[c, b]\n )\n targets = [target]\n\n # The actual optimization.\n res = optimize(targets)\n assert len(res) == 3\n\n # Test the correctness.\n assert verify_eval_seq(res, targets, simplify=False)\n\n # Test the cost.\n cost = get_flop_cost(res)\n leading_cost = get_flop_cost(res, leading=True)\n assert cost == 2 * n ** 3 + 2 * n ** 2\n assert leading_cost == 2 * n ** 3\n cost = get_flop_cost(res, ignore_consts=False)\n assert cost == 2 * n ** 3 + 4 * n ** 2\n\n #\n # Test case 2.\n #\n # The final expression to optimize is mathematically\n #\n # .. math::\n #\n # (X - 2 Y) * U * V\n #\n # Different from the first test case, here we concentrate more on the\n # treatment of depth complexity in the input. The sum intermediate needs to\n # be factored again.\n #\n\n # The target.\n target = dr.define_einst(\n t[a, b], x[a, c] * u[c, d] * v[d, b] - 2 * y[a, c] * u[c, d] * v[d, b]\n )\n targets = [target]\n\n # The actual optimization.\n res = optimize(targets)\n assert len(res) == 3\n\n # Test the correctness.\n assert verify_eval_seq(res, targets, simplify=True)\n\n # Test the cost.\n cost = get_flop_cost(res)\n leading_cost = get_flop_cost(res, leading=True)\n assert cost == 4 * n ** 3 + n ** 2\n assert leading_cost == 4 * n ** 3\n cost = get_flop_cost(res, ignore_consts=False)\n assert cost == 4 * n ** 3 + 2 * n ** 2\n\n # Test disabling summation optimization.\n res = optimize(targets, strategy=Strategy.BEST)\n assert verify_eval_seq(res, targets, simplify=True)\n new_cost = get_flop_cost(res, ignore_consts=False)\n assert new_cost - cost != 0", "def Avv_func(f):\n\n def Avv(x, v):\n def F(s):\n return f(x + v * s)\n\n return jacfwd(jacfwd(F))(0.0)\n\n return Avv", "def expm_krylov(Afunc, dt, vstart: xp.ndarray, block_size=50):\n if not np.iscomplex(dt):\n dt = dt.real\n\n # normalize starting vector\n vstart = xp.asarray(vstart)\n nrmv = float(xp.linalg.norm(vstart))\n assert nrmv > 0\n vstart = vstart / nrmv\n\n alpha = np.zeros(block_size)\n beta = np.zeros(block_size - 1)\n\n V = xp.empty((block_size, len(vstart)), dtype=vstart.dtype)\n V[0] = vstart\n res = None\n\n\n for j in range(len(vstart)):\n \n w = Afunc(V[j])\n alpha[j] = xp.vdot(w, V[j]).real\n\n if j == len(vstart)-1:\n #logger.debug(\"the krylov subspace is equal to the full space\")\n return _expm_krylov(alpha[:j+1], beta[:j], V[:j+1, :].T, nrmv, dt), j+1\n \n if len(V) == j+1:\n V, old_V = xp.empty((len(V) + block_size, len(vstart)), dtype=vstart.dtype), V\n V[:len(old_V)] = old_V\n del old_V\n alpha = np.concatenate([alpha, np.zeros(block_size)])\n beta = np.concatenate([beta, np.zeros(block_size)])\n\n w -= alpha[j]*V[j] + (beta[j-1]*V[j-1] if j > 0 else 0)\n beta[j] = xp.linalg.norm(w)\n if beta[j] < 100*len(vstart)*np.finfo(float).eps:\n # logger.warning(f'beta[{j}] ~= 0 encountered during Lanczos iteration.')\n return _expm_krylov(alpha[:j+1], beta[:j], V[:j+1, :].T, nrmv, dt), j+1\n\n if 3 < j and j % 2 == 0:\n new_res = _expm_krylov(alpha[:j+1], beta[:j], V[:j+1].T, nrmv, dt)\n if res is not None and xp.allclose(res, new_res):\n return new_res, j+1\n else:\n res = new_res\n V[j + 1] = w / beta[j]", "def l2_ardnmf(V, beta, tol, max_iter, W, H, a, b, verbose, random_state):\n F, N = V.shape\n K = W.shape[1]\n\n # To prevent from possible numerical instabilities, the data is added a\n # small value (Numpy's eps) and fitted to WH+eps instead of WH. You may\n # set eps = 0 to avoid this but divisions by zero may occur.\n V = V + EPS\n V_ap = W.dot(H) + EPS\n\n cst = (F+N)/2+a+1\n bound = b/cst\n\n scale_W = 0.5 * np.sum(W**2, axis=0).T\n scale_H = 0.5 * np.sum(H**2, axis=1)\n inv_lambda = cst/(scale_W+scale_H+b)\n\n fit = np.array([0] * (max_iter+1), dtype=np.float64)\n obj = np.array([0] * (max_iter+1), dtype=np.float64)\n lambdas = np.zeros((K, max_iter), dtype=np.float64)\n\n itera = 0\n rel = np.inf\n lambdas[:, itera] = 1./inv_lambda\n fit[itera] = betadiv(V,V_ap,beta)\n obj[itera] = fit[itera] + cst * np.sum(np.log(scale_W+scale_H+b))\n\n logger.setLevel(verbose)\n logger.info('iter = %4i | obj = %+5.2E | rel = %4.2E (target is %4.2E) \\n',itera, obj[itera], rel, tol)\n while rel > tol and itera < max_iter:\n itera = itera + 1\n\n ## Update H ##\n R = H * np.tile(inv_lambda[:, None], (1,N))\n\n if beta > 2:\n P = W.T.dot(V*V_ap ** (beta-2.))\n Q = W.T.dot(V_ap ** (beta-1.)) + R\n ex = 1./(beta-1.)\n elif beta == 2:\n P = W.T.dot(V)\n Q = (W.T.dot(W)).dot(H) + R + np.tile(EPS*np.sum(W,0).T, (1,N))\n ex = 1.\n elif (beta < 2) and (beta != 1):\n P = W.T.dot(V*V_ap ** (beta-2.))\n Q = W.T.dot(V_ap ** (beta-1.)) + R\n ex = 1./(3.-beta)\n elif beta == 1:\n P = W.T.dot(V/V_ap)\n Q = np.tile(np.sum(W,0).T[:, None], (1,N)) + R\n ex = 1./2\n\n ind = H>0;\n H[ind] = H[ind] * (P[ind]/Q[ind]) ** ex\n scale_H = 0.5 * np.sum(H ** 2,1);\n\n V_ap = W.dot(H) + EPS\n\n ## Update W ##\n R = W * np.tile(inv_lambda.T, (F,1))\n\n if beta > 2:\n P = (V*V_ap ** (beta-2.)).dot(H.T)\n Q = (V_ap ** (beta-1.)).dot(H.T) + R\n ex = 1./(beta-1)\n elif beta == 2:\n P = V.dot(H.T)\n Q = W.dot((H.dot(H.T))) + R + np.tile(EPS*np.sum(H,axis=1).T, (F,1))\n ex = 1.\n elif (beta < 2) and (beta != 1):\n P = (V*V_ap ** (beta-2.)).dot(H.T)\n Q = (V_ap ** (beta-1.)).dot(H.T) + R\n ex = 1./(3.-beta)\n elif beta == 1:\n P = (V/V_ap).dot(H.T)\n Q = np.tile(np.sum(H,axis=1).T, (F,1)) + R\n ex = 1./2\n\n ind = W>0\n W[ind] = W[ind] * (P[ind]/Q[ind]) ** ex\n scale_W = 0.5 * np.sum(W**2, axis=0).T\n\n V_ap = W.dot(H) + EPS\n\n ## Update lambda ##\n inv_lambda = cst/(scale_W+scale_H+b)\n\n ## Monitor ##\n fit[itera] = betadiv(V, V_ap, beta)\n obj[itera] = fit[itera] + cst*np.sum(np.log(scale_W+scale_H+b))\n lambdas[:,itera] = 1./inv_lambda\n\n # Compute relative change of the relevance parameters\n rel = np.max(np.abs((lambdas[:, itera]-lambdas[:, itera-1])/lambdas[:,itera]))\n\n # Display objective value and relative change every 500 iterations\n if itera % 500 == 0:\n logger.info('iter = %4i | obj = %+5.2E | rel = %4.2E (target is %4.2E) \\n',itera,obj[itera],rel,tol)\n\n # Trim variables\n fit = fit[:itera+1]\n obj = obj[:itera+1]\n lambdas = lambdas[:, :itera+1]\n\n # Add constant to obtain true minus log posterior value\n obj = obj + (K*cst*(1.-np.log(cst)))\n\n # Display final values\n logger.info('iter = %4i | obj = %+5.2E | rel = %4.2E (target is %4.2E) \\n',itera, obj[itera], rel, tol)\n if itera == max_iter:\n logger.info('Maximum number of iterations reached (n_iter_max = %d) \\n',n_iter_max)\n\n return W, H, lambdas, obj, fit, bound", "def admm_linearized_krylov(alpha, g, L, A, b, niter_arn, sigma, niter,\n **kwargs):\n h, q = arnoldi(A, b, x0, niter_arn)\n beta = (A(x0) - b).norm()\n\n Qn = KrylovSpaceEmbedding(q[:-1])\n Qnp1 = KrylovSpaceEmbedding(q)\n H = odl.MatrixOperator(h)\n assert alpha in H.domain\n assert Qnp1.domain == H.range\n assert L.domain == Qnp1.range\n\n g_transl = g.translated(-L(x0))\n\n U = L * Qn\n S = odl.BroadcastOperator(H, U)\n\n f = odl.solvers.ZeroFunctional(alpha.space)\n e1 = H.range.zero()\n e1[0] = 1\n data_fit = odl.solvers.L2NormSquared(H.range).translated(beta * e1)\n G = odl.solvers.SeparableSum(data_fit, g_transl)\n\n opnorm_H = odl.power_method_opnorm(H, maxiter=50)\n tau = 0.5 * sigma / opnorm_H ** 2\n odl.solvers.admm_linearized(alpha, f, G, S, tau, sigma, niter,\n **kwargs)", "def factor_mat(all_dat, f_num, iterations, regularization):\n\n\t# get # of users and # of items\n\t[u_num, i_num] = all_dat.shape\n\n\t# init user factors and item factors with random values\n\tu_fac = np.matrix(np.random.rand(u_num, f_num))\t# MxF\n\ti_fac = np.matrix(np.random.rand(i_num, f_num))\t# NxF\n\n\t# calculate the preference matrix\n\tpreference = cal_preference(all_dat)\n\n\t# calculate the confidence matrix\n\tconfidence = cal_confidence(all_dat)\n\t\n\t# recalculate the user factors and item factors using the alternating least square method\n\tfor itr in range(iterations):\n\t\tu_fac = alternate_ls(u_num, i_fac, preference, confidence, regularization)\n\t\t#print itr, \"u_fac\"\n\t\ti_fac = alternate_ls(i_num, u_fac, preference.T, confidence.T, regularization)\n\t\t#print itr, \"i_fac\"\n\t\n\t# save the output\n\tdf = pd.DataFrame(u_fac)\n\tdf.to_csv(\"tmp/u_fac.tmp\", index=False, header=False, sep='\\t', encoding='utf-8')\n\tdf = pd.DataFrame(i_fac.T)\n\tdf.to_csv(\"tmp/i_fac.tmp\", index=False, header=False, sep='\\t', encoding='utf-8')\n\n\t# an MxF user factor matrix and an FxN item factor matrix\n\treturn [u_fac, i_fac.T]", "def mylinearsvm(beta, lambd, x, y, step_size_init, eps=0.0000001, max_iter=100):\n theta = beta\n t = step_size_init\n grad_beta = grad(beta, lambd, x, y)\n beta_vals = [beta]\n objs = [obj(beta, lambd, x, y)]\n iter = 0\n while np.linalg.norm(grad_beta) > eps and iter < max_iter: \n # THE CODE BELOW SO IT USES BACKTRACKING LINE SEARCH INSTEAD OF A CONSTANT STEP SIZE\n t = backtracking(beta, lambd=lambd, x=x, y=y, step_size=t)\n # THE CODE BELOW USES UPDATING THETA FOR BETA OPTIMAZATION\n beta = theta - t*grad_beta\n theta = beta + (iter/(iter+3))*(beta - beta_vals[-1])\n obj_val = obj(beta,lambd, x, y)\n beta_vals.append(beta)\n objs.append(obj_val)\n grad_beta = grad(theta, lambd, x, y)\n iter += 1\n \n return np.array(beta_vals), np.array(objs)", "def _matFillinc(a, n, zarray, lam, mu, rho, grad_rho, g, \n beta_i, gamma, z_i, l, li, Q): \n for i in range(len(zarray)):\n \n # r dh/dr\n a[i,0,0] = -2\n a[i,0,1] = (n+1)\n \n # r dL/dr\n a[i,1,0] = -n\n a[i,1,1] = 1.\n a[i,1,2] = 0.\n a[i,1,3] = zarray[i]/mu[i]*l\n \n # r df_L/dr\n if Q == 1:\n a[i,2,0] = (12*mu[i]*z_i[i] - 4*rho[i]*g[i] \n + (rho[i]**2)*zarray[i])*li\n else:\n a[i,2,0] = (12*mu[i]*z_i[i] - 4*rho[i]*g[i])*li\n a[i,2,1] = -(6*mu[i]*z_i[i] - rho[i]*g[i])*(n+1)*li\n a[i,2,2] = 0.\n a[i,2,3] = (n+1.)\n if Q == 2:\n a[i,2,4] = -rho[i]*(n+1)*li\n a[i,2,5] = zarray[i]*rho[i]\n \n # r dF_M/dr\n a[i,3,0] = (rho[i]*g[i]-6*mu[i]*z_i[i])*n*li\n a[i,3,1] = 2*mu[i]*z_i[i]*(2*n*(n+1) - 1)*li\n a[i,3,2] = -n\n a[i,3,3] = -3\n a[i,3,4] = rho[i]*n*li\n \n # r dk_d/dr\n if Q == 2:\n a[i,4,0] = -rho[i]*zarray[i]\n a[i,4,1] = 0.\n a[i,4,2] = 0.\n a[i,4,3] = 0.\n if Q == 2:\n a[i,4,4] = -(n+1)\n a[i,4,5] = zarray[i]*l\n \n # r dq/dr\n if Q == 1:\n a[i,5,0] = -(grad_rho[i]*zarray[i])*li\n a[i,5,1] = 0.\n a[i,5,2] = 0.\n a[i,5,3] = 0.\n a[i,5,4] = z_i[i]*n*(n+1.)*li\n a[i,5,5] = -2\n else:\n a[i,5,0] = -rho[i]*(n+1)*li\n a[i,5,1] = rho[i]*(n+1)*li\n a[i,5,2] = 0\n a[i,5,3] = 0.\n a[i,5,4] = 0\n a[i,5,5] = n-1.", "def _fd_matrix(step_ratio, parity, nterms):\n _assert(0 <= parity <= 6,\n 'Parity must be 0, 1, 2, 3, 4, 5 or 6! ({0:d})'.format(parity))\n step = [1, 2, 2, 4, 4, 4, 4][parity]\n inv_sr = 1.0 / step_ratio\n offset = [1, 1, 2, 2, 4, 1, 3][parity]\n c0 = [1.0, 1.0, 1.0, 2.0, 24.0, 1.0, 6.0][parity]\n c = c0 / \\\n special.factorial(np.arange(offset, step * nterms + offset, step))\n [i, j] = np.ogrid[0:nterms, 0:nterms]\n return np.atleast_2d(c[j] * inv_sr ** (i * (step * j + offset)))", "def vectorize_scheme(self, optree, arg_list, vectorLen):\n table_set = extract_tables(optree)\n init_local_mapping = {table:table for table in table_set if table.const}\n\n # defaulting sub_vector_size to vector_size when undefined\n vectorized_path = self.extract_vectorizable_path(optree, fallback_policy, local_mapping=init_local_mapping)\n linearized_most_likely_path = vectorized_path.linearized_optree\n validity_list = vectorized_path.validity_mask_list\n\n # replacing temporary variables by their latest assigned values\n linearized_most_likely_path = instanciate_variable(linearized_most_likely_path, vectorized_path.variable_mapping)\n\n vector_paths = []\n vector_masks = []\n vector_arg_list = []\n\n def vlaVectorizeType(eltType):\n \"\"\" return the corresponding VLA format for a given element type \"\"\"\n # select group multiplier = 1 to for now\n return VLA_FORMAT_MAP[(eltType, self.defaultGroupSize)]\n\n # dictionnary of arg_node (scalar) -> new variable (vector) mapping\n vec_arg_dict = dict(\n (arg_node, Variable(\"vec_%s\" % arg_node.get_tag(), precision=vlaVectorizeType(arg_node.get_precision()), var_type=Variable.Local)) for arg_node in arg_list)\n constant_dict = {}\n\n arg_list_copy = dict((arg_node, vec_arg_dict[arg_node]) for arg_node in arg_list)\n # vector argument variables are already vectorized and should\n # be used directly in vectorized scheme\n vectorization_map = dict((vec_arg_dict[arg_node], vec_arg_dict[arg_node]) for arg_node in arg_list)\n\n # adding const table in pre-copied map to avoid replication\n arg_list_copy.update({table:table for table in table_set if table.const})\n sub_vector_path = linearized_most_likely_path.copy(arg_list_copy)\n\n vector_path = self.vector_replicate_scheme_in_place(sub_vector_path, vectorLen, vectorization_map)\n\n # no validity condition for vectorization (always valid)\n if len(validity_list) == 0:\n Log.report(Log.Warning, \"empty validity list encountered during vectorization\")\n vector_mask = Constant(True, precision = ML_Bool)\n else:\n vector_mask = and_merge_conditions(validity_list).copy(arg_list_copy)\n\n # todo/fixme: implement proper length agnostic masking\n vector_mask = self.vector_replicate_scheme_in_place(vector_mask, vectorLen, vectorization_map)\n\n # for the first iteration (first sub-vector), we extract\n constant_dict = extract_const(arg_list_copy)\n vec_arg_list = [vec_arg_dict[arg_node] for arg_node in arg_list]\n return vec_arg_list, vector_path, vector_mask", "def __init__(self, lmin=1, lmax=25, lstep=0.2,\n N=32, M=64, K=64, smin=1e-3):\n t1 = 1.5*np.pi*(np.arange(M) + 0.5)/M\n r1 = 1/np.fmax(np.abs(np.sin(t1)), np.abs(np.cos(t1)))\n t2 = 1.5*np.pi*np.random.rand(K)\n r2 = 1/np.fmax(np.abs(np.sin(t2)), np.abs(np.cos(t2)))\n r2 *= np.random.rand(K)\n t = np.hstack((t1,t2))[:,np.newaxis]\n r = np.hstack((r1,r2))[:,np.newaxis]\n\n k = 2/3*(np.arange(N) + 1)\n\n def ldfun(lm):\n A = basis_func(lm, k, r, t)\n\n Q,R = np.linalg.qr(A)\n _,S,V = np.linalg.svd(Q[:M])\n\n self.R = R\n self.S = S\n self.V = V\n return S[-1]\n \n ldfunv = np.vectorize(ldfun)\n L = int((lmax-lmin)/lstep)\n lm = np.linspace(lmin, lmax, L+1)\n S = ldfunv(lm)\n\n J = np.arange(1,L)\n J = J[(S[J] < S[J-1]) & (S[J] < S[J+1])]\n self.eigen_val = []\n self.multiplicity = []\n self.coeff = []\n\n for j in J:\n res = minimize_scalar(ldfun, (lm[j-1], lm[j], lm[j+1]))\n m = np.count_nonzero(self.S < smin)\n if m==0: continue\n c = np.linalg.solve(self.R, self.V[-m:].T)\n\n self.eigen_val.append(res.x)\n self.multiplicity.append(m)\n self.coeff.append(c.T)\n\n self.n_bases = N\n self.lm = lm\n self.svmin = S", "def nonnegative_tensor_factorization(X, r, method='anls_bpp',\n tol=1e-4, stop_criterion=1,\n min_iter=20, max_iter=200, max_time=1e6,\n init=None, orderWays=None):\n\n nWay = len(X.shape)\n\n if orderWays is None:\n orderWays = np.arange(nWay)\n\n # set initial values\n if init is not None:\n F_cell = init\n else:\n Finit = [np.random.rand(X.shape[i], r) for i in range(nWay)]\n F_cell = Finit\n\n grad = getGradient(X, F_cell, nWay, r)\n\n nr_X = X.norm()\n nr_grad_all = np.sqrt(np.sum(np.linalg.norm(grad[i], 'fro') ** 2\n for i in range(nWay)))\n\n if method == \"anls_bpp\":\n method = anls_bpp()\n elif method == \"anls_asgroup\":\n method = anls_asgroup()\n else:\n raise Exception(\"Unknown method\")\n\n # Execute initializer\n F_cell, FF_init = method.initializer(X, F_cell, nWay, orderWays)\n\n tStart = time.time()\n\n if stop_criterion == 2:\n F_kten = ktensor(F_cell)\n rel_Error = getRelError(X, ktensor(F_cell), nWay, nr_X)\n\n if stop_criterion == 1:\n pGrad = getProjGradient(X, F_cell, nWay, r)\n SC_PGRAD = getStopCriterion(pGrad, nWay, nr_grad_all)\n\n # main iterations\n for iteration in range(max_iter):\n cntu = True\n\n F_cell, FF_init = method.iterSolver(X, F_cell,\n FF_init, nWay, r, orderWays)\n F_kten = ktensor(F_cell)\n\n if iteration >= min_iter:\n\n if time.time() - tStart > max_time:\n cntu = False\n\n else:\n\n if stop_criterion == 1:\n pGrad = getProjGradient(X, F_cell, nWay, r)\n SC_PGRAD = getStopCriterion(pGrad, nWay, nr_grad_all)\n if SC_PGRAD < tol:\n cntu = False\n\n elif stop_criterion == 2:\n prev_rel_Error = rel_Error\n rel_Error = getRelError(X, F_kten, nWay, nr_X)\n SC_DIFF = np.abs(prev_rel_Error - rel_Error)\n if SC_DIFF < tol:\n cntu = False\n else:\n rel_Error = getRelError(X, F_kten, nWay, nr_X)\n if rel_Error < 1:\n cntu = False\n\n if not cntu:\n break\n\n return F_kten", "def FFT_vec(x):\n x = np.asarray(x,dtype=complex)\n N = x.shape[0]\n \n if np.log2(N) % 1 > 0:\n raise ValueError(\"size of x must be a power of 2\")\n\n Nm1d2 = int(np.floor((N-1)/2))\n if N & 1: # odd\n Ahat = np.zeros(1+Nm1d2)\n else: # even\n Ahat = np.zeros(1+Nm1d2+1)\n \n # N_min here is equivalent to the stopping condition above,\n # and should be a power of 2\n N_min = min(N, 32)\n \n # Perform an O[N^2] DFT on all length-N_min sub-problems at once\n n = np.arange(N_min)\n k = n[:, None]\n M = np.exp(-2*cmath.sqrt(-1) * cmath.pi * n * k / N_min)\n X = np.dot(M, x.reshape((N_min, -1)))\n Y = X.ravel()\n # build-up each level of the recursive calculation all at once\n while X.shape[0] < N:\n X_even = X[:, :int(X.shape[1]/ 2)]\n X_odd = X[:,int( X.shape[1] / 2):]\n factor = np.exp(-1*cmath.sqrt(-1) * cmath.pi * np.arange(X.shape[0])\n / X.shape[0])[:, None]\n X = np.vstack([X_even + factor * X_odd,\n X_even - factor * X_odd])\n Y = X.ravel()/N\n \n\n Ahat[-1] = np.real(Y[int(N/2)])\n Ahat[0] = np.real(Y[0])\n Ahat[1:(Nm1d2+1)] = +2*np.real(Y[1:(Nm1d2+1)])\n\n Bhat = -2*np.imag(Y[1:(Nm1d2+1)])\n\n##############################\n N= 2**9\n t = np.arange(N)/N\n a = 0\n b = 1\n \n\n ft = evalT(Ahat, Bhat, a, b, t)\n fig = plt.plot(t, ft, label='n = ' + str(x.size))\n plt.legend(['FFTvec'])\n \n ##############################\n\n return X.ravel(), Ahat, Bhat, fig, ft" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute an mstep arnoldi factorization of `matvec`, with m = min(`it`,`num_krylov_vecs`). The factorization will do at most `num_krylov_vecs` steps. The returned arrays `kv` and `H` will satisfy the Arnoldi recurrence relation ``` matrix @ Vm Vm @ Hm fm em = 0 ``` with `matrix` the matrix representation of `matvec` and
def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs, eps): Z = jax.numpy.linalg.norm(v0) v = v0 / Z krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[start, :], jax.numpy.ravel(v)) H = jax.lax.cond( start > 0, start, lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None, lambda x: H) # body of the arnoldi iteration def body(vals): krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals Av = matvec(vector, *args) initial_vals = [Av, krylov_vectors, i, H] Av, krylov_vectors, _, H = jax.lax.fori_loop( 0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals) norm = jax.numpy.linalg.norm(Av) Av /= norm H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm) krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[i + 1, :], jax.numpy.ravel(Av)) return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter] def cond_fun(vals): # Continue loop while iteration < num_krylov_vecs and norm > eps _, _, _, _, norm, _, iteration, _ = vals counter_done = (iteration >= num_krylov_vecs) norm_not_too_small = norm > eps continue_iteration = jax.lax.cond(counter_done, _, lambda x: False, _, lambda x: norm_not_too_small) return continue_iteration initial_norm = v.real.dtype.type(1.0+eps) initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start, num_krylov_vecs] final_values = jax.lax.while_loop(cond_fun, body, initial_values) kvfinal, Hfinal, _, _, norm, _, it, _ = final_values return kvfinal, Hfinal, it, norm < eps
[ "def implicitly_restarted_arnoldi_method(\n matvec, args, initial_state, num_krylov_vecs, numeig, which, eps, maxiter,\n res_thresh) -> Tuple[List[Tensor], List[Tensor]]:\n N = np.prod(initial_state.shape)\n p = num_krylov_vecs - numeig\n num_krylov_vecs = np.min([num_krylov_vecs, N])\n if (p <= 1) and (num_krylov_vecs < N):\n raise ValueError(f\"`num_krylov_vecs` must be between `numeig` + 1 <\"\n f\" `num_krylov_vecs` <= N={N},\"\n f\" `num_krylov_vecs`={num_krylov_vecs}\")\n\n dtype = initial_state.dtype\n # initialize arrays\n krylov_vectors = jax.numpy.zeros(\n (num_krylov_vecs + 1, jax.numpy.ravel(initial_state).shape[0]),\n dtype=dtype)\n H = jax.numpy.zeros((num_krylov_vecs + 1, num_krylov_vecs), dtype=dtype)\n # perform initial arnoldi factorization\n Vm_tmp, Hm_tmp, numits, converged = arnoldi_fact(matvec, args,\n initial_state,\n krylov_vectors, H, 0,\n num_krylov_vecs, eps)\n # obtain an m-step arnoldi factorization\n Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, numits)\n\n it = 0\n if which == 'LR':\n _which = 0\n elif which == 'LM':\n _which = 1\n else:\n raise ValueError(f\"which = {which} not implemented\")\n # make sure the dtypes are matching\n if maxiter > 0:\n if Vm.dtype == np.float64:\n dtype = np.complex128\n elif Vm.dtype == np.float32:\n dtype = np.complex64\n elif Vm.dtype == np.complex128:\n dtype = Vm.dtype\n elif Vm.dtype == np.complex64:\n dtype = Vm.dtype\n else:\n raise TypeError(f'dtype {Vm.dtype} not supported')\n Vm = Vm.astype(dtype)\n Hm = Hm.astype(dtype)\n fm = fm.astype(dtype)\n\n while (it < maxiter) and (not converged):\n evals, _ = jax.numpy.linalg.eig(Hm)\n krylov_vectors, H, fk, converged = shifted_QR(Vm, Hm, fm, evals, numeig,\n p, _which, res_thresh)\n if converged:\n break\n v0 = jax.numpy.reshape(fk, initial_state.shape)\n # restart\n Vm_tmp, Hm_tmp, _, converged = arnoldi_fact(matvec, args, v0,\n krylov_vectors, H, numeig,\n num_krylov_vecs, eps)\n Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, num_krylov_vecs)\n it += 1\n\n ev_, U_ = np.linalg.eig(np.array(Hm))\n eigvals = jax.numpy.array(ev_)\n U = jax.numpy.array(U_)\n _, inds = LR_sort(eigvals, _which)\n vectors = get_vectors(Vm, U, inds, numeig)\n\n return eigvals[inds[0:numeig]], [\n jax.numpy.reshape(vectors[n, :], initial_state.shape)\n for n in range(numeig)\n ]", "def expm_krylov(Afunc, dt, vstart: xp.ndarray, block_size=50):\n if not np.iscomplex(dt):\n dt = dt.real\n\n # normalize starting vector\n vstart = xp.asarray(vstart)\n nrmv = float(xp.linalg.norm(vstart))\n assert nrmv > 0\n vstart = vstart / nrmv\n\n alpha = np.zeros(block_size)\n beta = np.zeros(block_size - 1)\n\n V = xp.empty((block_size, len(vstart)), dtype=vstart.dtype)\n V[0] = vstart\n res = None\n\n\n for j in range(len(vstart)):\n \n w = Afunc(V[j])\n alpha[j] = xp.vdot(w, V[j]).real\n\n if j == len(vstart)-1:\n #logger.debug(\"the krylov subspace is equal to the full space\")\n return _expm_krylov(alpha[:j+1], beta[:j], V[:j+1, :].T, nrmv, dt), j+1\n \n if len(V) == j+1:\n V, old_V = xp.empty((len(V) + block_size, len(vstart)), dtype=vstart.dtype), V\n V[:len(old_V)] = old_V\n del old_V\n alpha = np.concatenate([alpha, np.zeros(block_size)])\n beta = np.concatenate([beta, np.zeros(block_size)])\n\n w -= alpha[j]*V[j] + (beta[j-1]*V[j-1] if j > 0 else 0)\n beta[j] = xp.linalg.norm(w)\n if beta[j] < 100*len(vstart)*np.finfo(float).eps:\n # logger.warning(f'beta[{j}] ~= 0 encountered during Lanczos iteration.')\n return _expm_krylov(alpha[:j+1], beta[:j], V[:j+1, :].T, nrmv, dt), j+1\n\n if 3 < j and j % 2 == 0:\n new_res = _expm_krylov(alpha[:j+1], beta[:j], V[:j+1].T, nrmv, dt)\n if res is not None and xp.allclose(res, new_res):\n return new_res, j+1\n else:\n res = new_res\n V[j + 1] = w / beta[j]", "def get_Lk(m, lvec):\n\n raise NotImplementedError", "def times_vector(mat, vec):\n rows, cols = mat.shape\n num_blocks = num_blocks_from_total_blocks(cols // rows)\n multiplied = []\n for i in range(num_blocks):\n mat_block = mat[\n Ellipsis, rows * ((i + 1) * i) // 2 : rows * ((i + 1) * (i + 2)) // 2\n ]\n vec_block = vec[Ellipsis, rows * i : rows * (i + 1)]\n multiplied.append(jnp.einsum(\"...ij,...i->ij\", mat_block, vec_block))\n return jnp.concatenate(multiplied, axis=-1)", "def vecMatProduct(mat,vec):\n if isinstance(mat, np.ndarray):\n vecR = np.resize(vec,(len(vec),1))\n return matrixProduct(mat,vecR)[:,0]\n elif isinstance(mat, sp.Sparse):\n V = [0]*len(vec)\n for pos in mat.matrixDict:\n V[pos[0]] += mat.matrixDict[pos]*vec[pos[1]]\n return np.array(V)\n else:\n raise TypeError(\"Incorrect type for matrix/vector to format together: \\\n numpy array or custom sparse matrix please\")", "def _generate_arnoldi_factorization(jax: types.ModuleType) -> Callable:\n\n @jax.jit\n def modified_gram_schmidt_step_arnoldi(j, vals):\n \"\"\"\n Single step of a modified gram-schmidt orthogonalization.\n Args:\n j: Integer value denoting the vector to be orthogonalized.\n vals: A list of variables:\n `vector`: The current vector to be orthogonalized\n to all previous ones\n `krylov_vectors`: jax.array of collected krylov vectors\n `n`: integer denoting the column-position of the overlap\n <`krylov_vector`|`vector`> within `H`.\n Returns:\n updated vals.\n\n \"\"\"\n vector, krylov_vectors, n, H = vals\n v = krylov_vectors[j, :]\n h = jax.numpy.vdot(v, vector)\n H = jax.ops.index_update(H, jax.ops.index[j, n], h)\n vector = vector - h * jax.numpy.reshape(v, vector.shape)\n return [vector, krylov_vectors, n, H]\n\n @functools.partial(jax.jit, static_argnums=(5, 6, 7))\n def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs,\n eps):\n \"\"\"\n Compute an m-step arnoldi factorization of `matvec`, with\n m = min(`it`,`num_krylov_vecs`). The factorization will\n do at most `num_krylov_vecs` steps. The returned arrays\n `kv` and `H` will satisfy the Arnoldi recurrence relation\n ```\n matrix @ Vm - Vm @ Hm - fm * em = 0\n ```\n with `matrix` the matrix representation of `matvec` and\n `Vm = jax.numpy.transpose(kv[:it, :])`,\n `Hm = H[:it, :it]`, `fm = np.expand_dims(kv[it, :] * H[it, it - 1]`,1)\n and `em` a cartesian basis vector of shape `(1, kv.shape[1])`\n with `em[0, -1] == 1` and 0 elsewhere.\n\n Note that the caller is responsible for dtype consistency between\n the inputs, i.e. dtypes between all input arrays have to match.\n\n Args:\n matvec: The matrix vector product.\n args: List of arguments to `matvec`.\n v0: Initial state to `matvec`.\n krylov_vectors: An array for storing the krylov vectors. The individual\n vectors are stored as columns.\n The shape of `krylov_vecs` has to be\n (num_krylov_vecs + 1, np.ravel(v0).shape[0]).\n H: Matrix of overlaps. The shape has to be\n (num_krylov_vecs + 1,num_krylov_vecs + 1).\n start: Integer denoting the start position where the first\n produced krylov_vector should be inserted into `krylov_vectors`\n num_krylov_vecs: Number of krylov iterations, should be identical to\n `krylov_vectors.shape[0] + 1`\n eps: Convergence parameter. Iteration is terminated if the norm of a\n krylov-vector falls below `eps`.\n Returns:\n kv: An array of krylov vectors\n H: A matrix of overlaps\n it: The number of performed iterations.\n \"\"\"\n Z = jax.numpy.linalg.norm(v0)\n v = v0 / Z\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[start, :],\n jax.numpy.ravel(v))\n H = jax.lax.cond(\n start > 0, start,\n lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None,\n lambda x: H)\n\n # body of the arnoldi iteration\n def body(vals):\n krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals\n Av = matvec(vector, *args)\n initial_vals = [Av, krylov_vectors, i, H]\n Av, krylov_vectors, _, H = jax.lax.fori_loop(\n 0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals)\n norm = jax.numpy.linalg.norm(Av)\n Av /= norm\n H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm)\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[i + 1, :],\n jax.numpy.ravel(Av))\n return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter]\n\n def cond_fun(vals):\n # Continue loop while iteration < num_krylov_vecs and norm > eps\n _, _, _, _, norm, _, iteration, _ = vals\n counter_done = (iteration >= num_krylov_vecs)\n norm_not_too_small = norm > eps\n continue_iteration = jax.lax.cond(counter_done,\n _, lambda x: False,\n _, lambda x: norm_not_too_small)\n\n return continue_iteration\n initial_norm = v.real.dtype.type(1.0+eps)\n initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start,\n num_krylov_vecs]\n final_values = jax.lax.while_loop(cond_fun, body, initial_values)\n kvfinal, Hfinal, _, _, norm, _, it, _ = final_values\n return kvfinal, Hfinal, it, norm < eps\n\n return _arnoldi_fact", "def KE(m, v):\n return 0.5 * np.sum(np.dot(m, v * v))", "def krylovStep(self):\n m = self.m\n \n # update Hm and Vm with info from last step\n self.Hm[m,m-1] = self.h_m1\n self.v_m1.shape = (self.v_m1.shape[0],1)\n self.Vm[:,m] = self.v_m1.copy().T #v_m1 is normalized\n \n # compute new step\n self.v_m1 = self.applyA(self.Vm[:,m])\n # Gram-Schmidt orthogonalization\n # TODO: replace with Householder ortho. -> is more stable\n for j in xrange(m+1):\n self.Hm[j, m] = np.dot(self.Vm[:,j], self.v_m1)\n self.v_m1 -= self.Hm[ j, m] * self.Vm[:, j]\n self.h_m1 = norm(self.v_m1)\n m = m+1; self.m = m\n \n # test for break-down:\n if norm(self.h_m1) < np.finfo(np.double).eps:\n self.isExact = True\n return\n \n self.v_m1 = self.v_m1 / self.h_m1\n return", "def _z2matvecmul(self, mat, vec):\n prod = np.mod(np.dot(mat, vec), 2)\n return prod", "def preprocessing_vsm(self, k, m):\n self.inv_lists_sorted = OrderedDict(sorted(self.inverted_lists.items(),\n key=lambda x: len(x[1]),\n reverse=True)[:m])\n N = self.num_docs\n AVDL = sum(self.record_lengths.values()) / float(N)\n\n term_id = 0\n nz_vals, row_inds, col_inds = [], [], []\n\n for term, inv_list in self.inv_lists_sorted.items():\n for doc_id, tf in inv_list.items():\n df = len(self.inv_lists_sorted[term])\n DL = self.record_lengths[doc_id]\n self.inv_lists_sorted[term][doc_id] = \\\n self.bm25_score(tf, df, N, AVDL, DL)\n nz_vals += [v for v in self.inv_lists_sorted[term].values()\n if v != 0]\n row_inds += [term_id] * len(self.inv_lists_sorted[term])\n col_inds += [id - 1 for id, v in\n self.inv_lists_sorted[term].items() if v != 0]\n term_id += 1\n self.A = scipy.sparse.csr_matrix((nz_vals, (row_inds, col_inds)),\n dtype=float)\n # LSI\n self.Uk, Sk, self.Vk = scipy.sparse.linalg.svds(self.A, k)\n self.Sk = np.diag(Sk)", "def _lanczos_m_upd(A, m, matrix_shape, nv=1, rademacher=False, SV=None):\n\n orthtol = 1e-16\n\n if type(SV) != np.ndarray:\n if rademacher:\n # SV = np.sign(np.random.randn(A.shape[0], nv))\n SV = np.sign(np.random.randn(matrix_shape[0], nv))\n else:\n # SV = np.random.randn(A.shape[0], nv) # init random vectors in columns: n x nv\n SV = np.random.randn(matrix_shape[0], nv)\n\n V = np.zeros((SV.shape[0], m, nv))\n T = np.zeros((nv, m, m))\n\n np.divide(SV, np.linalg.norm(SV, axis=0), out=SV) # normalize each column\n V[:, 0, :] = SV\n\n w = A.matvec(SV.squeeze())\n w = w.reshape(-1, 1)\n alpha = np.einsum('ij,ij->j', w, SV)\n w -= alpha[None, :] * SV\n beta = np.einsum('ij,ij->j', w, w)\n np.sqrt(beta, beta)\n\n T[:, 0, 0] = alpha\n T[:, 0, 1] = beta\n T[:, 1, 0] = beta\n\n np.divide(w, beta[None, :], out=w)\n V[:, 1, :] = w\n t = np.zeros((m, nv))\n\n for i in range(1, m):\n SVold = V[:, i - 1, :]\n SV = V[:, i, :]\n\n w = A.dot(SV.squeeze()) # sparse @ dense\n w = w.reshape(-1, 1)\n w -= beta[None, :] * SVold # n x nv\n np.einsum('ij,ij->j', w, SV, out=alpha)\n\n T[:, i, i] = alpha\n\n if i < m - 1:\n w -= alpha[None, :] * SV # n x nv\n # reortho\n np.einsum('ijk,ik->jk', V, w, out=t)\n w -= np.einsum('ijk,jk->ik', V, t)\n np.einsum('ij,ij->j', w, w, out=beta)\n np.sqrt(beta, beta)\n np.divide(w, beta[None, :], out=w)\n\n T[:, i, i + 1] = beta\n T[:, i + 1, i] = beta\n\n # more reotho\n innerprod = np.einsum('ijk,ik->jk', V, w)\n reortho = False\n for _ in range(100):\n if (innerprod <= orthtol).sum():\n reortho = True\n break\n np.einsum('ijk,ik->jk', V, w, out=t)\n w -= np.einsum('ijk,jk->ik', V, t)\n np.divide(w, np.linalg.norm(w, axis=0)[None, :], out=w)\n innerprod = np.einsum('ijk,ik->jk', V, w)\n\n V[:, i + 1, :] = w\n\n if (np.abs(beta) > 1e-2).sum() == 0 or not reortho:\n break\n return T, V", "def eigenpairs(mat):\n\n\tdef eigenvals(mat):\n\t\tqs, mat = jf.triangularize(mat)\n\t\treturn [mat[i][i] for i in range(len(mat))]\n\n\tvals = eigenvals(mat)\n\tprint(vals, flush = True)\n\tvecs = []\n\tfor eigenval in vals:\n\n\t\tmat_copy = [row[:] for row in mat]\n\t\tfor i in range(len(mat_copy)):\n\t\t\tmat_copy[i][i] -= eigenval\n\t\tkern = vm.numerical_kernel(mat_copy)\n\t\tvecs.append(kern)\n\t\tprint(kern, flush = True)\n\treturn vals, vecs", "def minimum_eigen_vector(x, num_steps, learning_rate, vector_prod_fn):\n x = tf.nn.l2_normalize(x)\n for _ in range(num_steps):\n x = eig_one_step(x, learning_rate, vector_prod_fn)\n return x", "def _generate_jitted_eigsh_lanczos(jax: types.ModuleType) -> Callable:\n\n @functools.partial(jax.jit, static_argnums=(3, 4, 5, 6))\n def jax_lanczos(matvec, arguments, init, ncv, neig, landelta, reortho):\n \"\"\"\n Jitted lanczos routine.\n Args:\n matvec: A callable implementing the matrix-vector product of a\n linear operator.\n arguments: Arguments to `matvec` additional to an input vector.\n `matvec` will be called as `matvec(init, *args)`.\n init: An initial input state to `matvec`.\n ncv: Number of krylov iterations (i.e. dimension of the Krylov space).\n neig: Number of eigenvalue-eigenvector pairs to be computed.\n landelta: Convergence parameter: if the norm of the current Lanczos vector\n falls below `landelta`, iteration is stopped.\n reortho: If `True`, reorthogonalize all krylov vectors at each step.\n This should be used if `neig>1`.\n Returns:\n jax.numpy.ndarray: Eigenvalues\n list: Eigenvectors\n \"\"\"\n\n def body_modified_gram_schmidt(i, vals):\n vector, krylov_vectors = vals\n v = krylov_vectors[i, :]\n vector -= jax.numpy.vdot(v, vector) * jax.numpy.reshape(v, vector.shape)\n return [vector, krylov_vectors]\n\n def body_lanczos(vals):\n current_vector, krylov_vectors, vector_norms = vals[0:3]\n diagonal_elements, matvec, args, _ = vals[3:7]\n threshold, i, maxiteration = vals[7:]\n norm = jax.numpy.linalg.norm(current_vector)\n normalized_vector = current_vector / norm\n normalized_vector, krylov_vectors = jax.lax.cond(\n reortho, True,\n lambda x: jax.lax.fori_loop(0, i, body_modified_gram_schmidt,\n [normalized_vector, krylov_vectors]),\n False, lambda x: [normalized_vector, krylov_vectors])\n Av = matvec(normalized_vector, *args)\n\n diag_element = jax.numpy.vdot(normalized_vector, Av)\n\n res = jax.numpy.reshape(\n jax.numpy.ravel(Av) -\n jax.numpy.ravel(normalized_vector) * diag_element -\n krylov_vectors[i - 1] * norm, Av.shape)\n krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[i, :],\n jax.numpy.ravel(normalized_vector))\n\n vector_norms = jax.ops.index_update(vector_norms, jax.ops.index[i - 1],\n norm)\n diagonal_elements = jax.ops.index_update(diagonal_elements,\n jax.ops.index[i - 1],\n diag_element)\n\n return [\n res, krylov_vectors, vector_norms, diagonal_elements, matvec, args,\n norm, threshold, i + 1, maxiteration\n ]\n\n def cond_fun(vals):\n _, _, _, _, _, _, norm, threshold, iteration, maxiteration = vals\n\n def check_thresh(check_vals):\n val, thresh = check_vals\n return jax.lax.cond(val < thresh, False, lambda x: x, True, lambda x: x)\n\n return jax.lax.cond(iteration <= maxiteration, [norm, threshold],\n check_thresh, False, lambda x: x)\n\n numel = jax.numpy.prod(init.shape)\n krylov_vecs = jax.numpy.zeros((ncv + 1, numel), dtype=init.dtype)\n norms = jax.numpy.zeros(ncv, dtype=init.dtype)\n diag_elems = jax.numpy.zeros(ncv, dtype=init.dtype)\n\n norms = jax.ops.index_update(norms, jax.ops.index[0], 1.0)\n\n norms_dtype = jax.numpy.real(jax.numpy.empty((0, 0),\n dtype=init.dtype)).dtype\n initvals = [\n init, krylov_vecs, norms, diag_elems, matvec, arguments,\n norms_dtype.type(1.0), landelta, 1, ncv\n ]\n output = jax.lax.while_loop(cond_fun, body_lanczos, initvals)\n final_state, krylov_vecs, norms, diags, _, _, _, _, it, _ = output\n krylov_vecs = jax.ops.index_update(krylov_vecs, jax.ops.index[it, :],\n jax.numpy.ravel(final_state))\n\n A_tridiag = jax.numpy.diag(diags) + jax.numpy.diag(\n norms[1:], 1) + jax.numpy.diag(jax.numpy.conj(norms[1:]), -1)\n eigvals, U = jax.numpy.linalg.eigh(A_tridiag)\n eigvals = eigvals.astype(A_tridiag.dtype)\n\n def body_vector(i, vals):\n krv, unitary, states = vals\n dim = unitary.shape[1]\n n, m = jax.numpy.divmod(i, dim)\n states = jax.ops.index_add(states, jax.ops.index[n, :],\n krv[m + 1, :] * unitary[m, n])\n return [krv, unitary, states]\n\n state_vectors = jax.numpy.zeros([neig, numel], dtype=init.dtype)\n _, _, vectors = jax.lax.fori_loop(0, neig * (krylov_vecs.shape[0] - 1),\n body_vector,\n [krylov_vecs, U, state_vectors])\n\n return jax.numpy.array(eigvals[0:neig]), [\n jax.numpy.reshape(vectors[n, :], init.shape) /\n jax.numpy.linalg.norm(vectors[n, :]) for n in range(neig)\n ]\n\n return jax_lanczos", "def KS_matsolve_serial(\n self, T_sparse, B_sparse, v, xgrid, bc, solve_type, eigs_min_guess\n ):\n if solve_type == \"guess\":\n dtype = np.float64\n else:\n dtype = self.fp\n # compute the number of grid points\n N = np.size(xgrid)\n\n # initialize the eigenfunctions and their eigenvalues\n eigfuncs = np.zeros((config.spindims, config.lmax, config.nmax, N), dtype=dtype)\n eigvals = np.zeros((config.spindims, config.lmax, config.nmax), dtype=dtype)\n eigs_guess = np.zeros((config.spindims, config.lmax), dtype=dtype)\n\n # A new Hamiltonian has to be re-constructed for every value of l and each spin\n # channel if spin-polarized\n for l in range(config.lmax):\n # diagonalize Hamiltonian using scipy\n for i in range(np.shape(v)[0]):\n # fill potential matrices\n if self.grid_type == \"log\":\n v_corr = 0.5 * (l + 0.5) ** 2 * np.exp(-2 * xgrid)\n else:\n v_corr = 3 / (32 * xgrid**4) + l * (l + 1) / (2 * xgrid**4)\n V_mat_sparse = diags([v[i] + v_corr], offsets=[0], dtype=dtype)\n\n # construct Hamiltonians\n H_sparse = T_sparse + B_sparse @ V_mat_sparse\n\n # if dirichlet solve on (N-1) x (N-1) grid\n if bc == \"dirichlet\":\n H_sparse_s = self.mat_convert_dirichlet(H_sparse)\n B_sparse_s = self.mat_convert_dirichlet(B_sparse)\n # if neumann don't change anything\n elif bc == \"neumann\":\n H_sparse_s = H_sparse\n B_sparse_s = B_sparse\n\n # we seek the lowest nmax eigenvalues from sparse matrix diagonalization\n # use 'shift-invert mode' to find the eigenvalues nearest in magnitude\n # to the est. lowest eigenvalue from full diagonalization on coarse grid\n if solve_type == \"full\":\n eigs_up, vecs_up = eigs(\n H_sparse_s,\n k=config.nmax,\n M=B_sparse_s,\n which=\"LM\",\n sigma=eigs_min_guess[i, l],\n tol=config.conv_params[\"eigtol\"],\n )\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=np.ComplexWarning)\n vecs_up = vecs_up.astype(self.fp)\n eigs_up = eigs_up.astype(self.fp)\n\n K = np.zeros((N, config.nmax), dtype=dtype)\n if self.grid_type == \"log\":\n prefac = -2 * np.exp(2 * xgrid)\n else:\n prefac = 8 * xgrid**2\n for n in range(config.nmax):\n K[:, n] = prefac * (v[i] + v_corr - eigs_up.real[n])\n\n eigfuncs[i, l], eigvals[i, l] = self.update_orbs(\n vecs_up, eigs_up, xgrid, bc, K, self.grid_type\n )\n\n elif solve_type == \"guess\":\n # estimate the lowest eigenvalues for a given value of l\n B_dense = B_sparse.todense()\n invB = linalg.inv(B_dense)\n eigs_up = linalg.eigvals(\n invB * H_sparse.todense(), check_finite=False\n )\n if not np.all(np.isclose(invB * B_dense, np.eye(len(xgrid)))):\n print(\"Warning: B matrix in eigs_guess is ill-conditioned\")\n\n # sort the eigenvalues to find the lowest\n idr = np.argsort(eigs_up)\n eigs_guess[i, l] = np.array(eigs_up[idr].real, dtype=dtype)[0]\n\n # dummy variable for the null eigenfucntions\n eigfuncs_null = eigfuncs\n\n if solve_type == \"full\":\n return eigfuncs, eigvals\n else:\n return eigfuncs_null, eigs_guess", "def _initialise_k_vec( self ):\n return self._initialise_v_vec( 'k' )", "def get_vf_matrix(self, geom_dict, view_matrix, obstr_matrix, list_pvrow):\n n_all_surfaces = view_matrix.shape[0]\n view_factors = np.zeros((n_all_surfaces, n_all_surfaces), dtype=float)\n\n # --- First deal with finite surfaces from the registry, and treat only\n # half of the views because symmetry will be used next\n n_finite_surfaces = n_all_surfaces - 1 # no sky\n view_matrix_upper_finite_surfaces = np.triu(\n view_matrix[:n_finite_surfaces, :n_finite_surfaces])\n indices_views_finite = np.where(view_matrix_upper_finite_surfaces)\n\n n_views = len(indices_views_finite[0])\n geometries = list(geom_dict.values())\n for i in range(n_views):\n idx = (indices_views_finite[0][i], indices_views_finite[1][i])\n view = self.mapper.reverse_view[view_matrix[idx]]\n line_i = geometries[idx[0]]\n line_j = geometries[idx[1]]\n obstr_index = obstr_matrix[idx]\n if obstr_index is not None:\n obstructing_pvrow = list_pvrow[obstr_matrix[idx]]\n else:\n obstructing_pvrow = None\n # The following line takes the most time to execute (looped)\n view_factors[idx] = self.mapper.function_mapping[view](\n line_i, line_j, obstructing_pvrow)\n\n # Use the reciprocity property of view factors to speed up the\n # vfactor calculation: A_1 * F_1-2 = A_2 * F_2-1 ==> symmetric matrx\n areas = np.array([surf.length for surf in geometries])\n matrix_areas = np.diag(areas)\n matrix_areas_inv = np.diag(1. / areas)\n\n upper_matrix_reciprocity = np.dot(matrix_areas,\n view_factors[:n_finite_surfaces,\n :n_finite_surfaces])\n\n total_matrix_reciprocity = (upper_matrix_reciprocity +\n upper_matrix_reciprocity.T)\n finite_vf_matrix = np.dot(matrix_areas_inv, total_matrix_reciprocity)\n view_factors[:n_finite_surfaces, :n_finite_surfaces] = finite_vf_matrix\n\n # --- Then do the calculations for the sky, which is the remaining\n # portion of the hemisphere\n view_factors[:-1, -1] = 1. - np.sum(view_factors[:-1, :-1], axis=1)\n return view_factors", "def minkowskiArrayDot(X, vec):\n MDP_max = -(1 + 1e-10)\n k = X.shape[1]\n vec = vec.reshape((k, -1))\n mod = np.ones(vec.shape)\n mod[-1] = -1\n MDP = np.matmul(X, vec*mod)\n #MDP[MDP > MDP_max] = MDP_max\n return MDP", "def LM(m, v):\n\n lm = np.array([0.0, 0.0])\n natoms = int(len(v) / 2)\n for i in range(0, natoms):\n x = m[2 * i][2 * i] * vector(v, i)\n lm += x\n\n return (lm)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implicitly restarted arnoldi factorization of `matvec`. The routine finds the lowest `numeig` eigenvectoreigenvalue pairs of `matvec` by alternating between compression and reexpansion of an initial `num_krylov_vecs`step Arnoldi factorization.
def implicitly_restarted_arnoldi_method( matvec, args, initial_state, num_krylov_vecs, numeig, which, eps, maxiter, res_thresh) -> Tuple[List[Tensor], List[Tensor]]: N = np.prod(initial_state.shape) p = num_krylov_vecs - numeig num_krylov_vecs = np.min([num_krylov_vecs, N]) if (p <= 1) and (num_krylov_vecs < N): raise ValueError(f"`num_krylov_vecs` must be between `numeig` + 1 <" f" `num_krylov_vecs` <= N={N}," f" `num_krylov_vecs`={num_krylov_vecs}") dtype = initial_state.dtype # initialize arrays krylov_vectors = jax.numpy.zeros( (num_krylov_vecs + 1, jax.numpy.ravel(initial_state).shape[0]), dtype=dtype) H = jax.numpy.zeros((num_krylov_vecs + 1, num_krylov_vecs), dtype=dtype) # perform initial arnoldi factorization Vm_tmp, Hm_tmp, numits, converged = arnoldi_fact(matvec, args, initial_state, krylov_vectors, H, 0, num_krylov_vecs, eps) # obtain an m-step arnoldi factorization Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, numits) it = 0 if which == 'LR': _which = 0 elif which == 'LM': _which = 1 else: raise ValueError(f"which = {which} not implemented") # make sure the dtypes are matching if maxiter > 0: if Vm.dtype == np.float64: dtype = np.complex128 elif Vm.dtype == np.float32: dtype = np.complex64 elif Vm.dtype == np.complex128: dtype = Vm.dtype elif Vm.dtype == np.complex64: dtype = Vm.dtype else: raise TypeError(f'dtype {Vm.dtype} not supported') Vm = Vm.astype(dtype) Hm = Hm.astype(dtype) fm = fm.astype(dtype) while (it < maxiter) and (not converged): evals, _ = jax.numpy.linalg.eig(Hm) krylov_vectors, H, fk, converged = shifted_QR(Vm, Hm, fm, evals, numeig, p, _which, res_thresh) if converged: break v0 = jax.numpy.reshape(fk, initial_state.shape) # restart Vm_tmp, Hm_tmp, _, converged = arnoldi_fact(matvec, args, v0, krylov_vectors, H, numeig, num_krylov_vecs, eps) Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, num_krylov_vecs) it += 1 ev_, U_ = np.linalg.eig(np.array(Hm)) eigvals = jax.numpy.array(ev_) U = jax.numpy.array(U_) _, inds = LR_sort(eigvals, _which) vectors = get_vectors(Vm, U, inds, numeig) return eigvals[inds[0:numeig]], [ jax.numpy.reshape(vectors[n, :], initial_state.shape) for n in range(numeig) ]
[ "def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs,\n eps):\n Z = jax.numpy.linalg.norm(v0)\n v = v0 / Z\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[start, :],\n jax.numpy.ravel(v))\n H = jax.lax.cond(\n start > 0, start,\n lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None,\n lambda x: H)\n\n # body of the arnoldi iteration\n def body(vals):\n krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals\n Av = matvec(vector, *args)\n initial_vals = [Av, krylov_vectors, i, H]\n Av, krylov_vectors, _, H = jax.lax.fori_loop(\n 0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals)\n norm = jax.numpy.linalg.norm(Av)\n Av /= norm\n H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm)\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[i + 1, :],\n jax.numpy.ravel(Av))\n return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter]\n\n def cond_fun(vals):\n # Continue loop while iteration < num_krylov_vecs and norm > eps\n _, _, _, _, norm, _, iteration, _ = vals\n counter_done = (iteration >= num_krylov_vecs)\n norm_not_too_small = norm > eps\n continue_iteration = jax.lax.cond(counter_done,\n _, lambda x: False,\n _, lambda x: norm_not_too_small)\n\n return continue_iteration\n initial_norm = v.real.dtype.type(1.0+eps)\n initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start,\n num_krylov_vecs]\n final_values = jax.lax.while_loop(cond_fun, body, initial_values)\n kvfinal, Hfinal, _, _, norm, _, it, _ = final_values\n return kvfinal, Hfinal, it, norm < eps", "def _generate_arnoldi_factorization(jax: types.ModuleType) -> Callable:\n\n @jax.jit\n def modified_gram_schmidt_step_arnoldi(j, vals):\n \"\"\"\n Single step of a modified gram-schmidt orthogonalization.\n Args:\n j: Integer value denoting the vector to be orthogonalized.\n vals: A list of variables:\n `vector`: The current vector to be orthogonalized\n to all previous ones\n `krylov_vectors`: jax.array of collected krylov vectors\n `n`: integer denoting the column-position of the overlap\n <`krylov_vector`|`vector`> within `H`.\n Returns:\n updated vals.\n\n \"\"\"\n vector, krylov_vectors, n, H = vals\n v = krylov_vectors[j, :]\n h = jax.numpy.vdot(v, vector)\n H = jax.ops.index_update(H, jax.ops.index[j, n], h)\n vector = vector - h * jax.numpy.reshape(v, vector.shape)\n return [vector, krylov_vectors, n, H]\n\n @functools.partial(jax.jit, static_argnums=(5, 6, 7))\n def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs,\n eps):\n \"\"\"\n Compute an m-step arnoldi factorization of `matvec`, with\n m = min(`it`,`num_krylov_vecs`). The factorization will\n do at most `num_krylov_vecs` steps. The returned arrays\n `kv` and `H` will satisfy the Arnoldi recurrence relation\n ```\n matrix @ Vm - Vm @ Hm - fm * em = 0\n ```\n with `matrix` the matrix representation of `matvec` and\n `Vm = jax.numpy.transpose(kv[:it, :])`,\n `Hm = H[:it, :it]`, `fm = np.expand_dims(kv[it, :] * H[it, it - 1]`,1)\n and `em` a cartesian basis vector of shape `(1, kv.shape[1])`\n with `em[0, -1] == 1` and 0 elsewhere.\n\n Note that the caller is responsible for dtype consistency between\n the inputs, i.e. dtypes between all input arrays have to match.\n\n Args:\n matvec: The matrix vector product.\n args: List of arguments to `matvec`.\n v0: Initial state to `matvec`.\n krylov_vectors: An array for storing the krylov vectors. The individual\n vectors are stored as columns.\n The shape of `krylov_vecs` has to be\n (num_krylov_vecs + 1, np.ravel(v0).shape[0]).\n H: Matrix of overlaps. The shape has to be\n (num_krylov_vecs + 1,num_krylov_vecs + 1).\n start: Integer denoting the start position where the first\n produced krylov_vector should be inserted into `krylov_vectors`\n num_krylov_vecs: Number of krylov iterations, should be identical to\n `krylov_vectors.shape[0] + 1`\n eps: Convergence parameter. Iteration is terminated if the norm of a\n krylov-vector falls below `eps`.\n Returns:\n kv: An array of krylov vectors\n H: A matrix of overlaps\n it: The number of performed iterations.\n \"\"\"\n Z = jax.numpy.linalg.norm(v0)\n v = v0 / Z\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[start, :],\n jax.numpy.ravel(v))\n H = jax.lax.cond(\n start > 0, start,\n lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None,\n lambda x: H)\n\n # body of the arnoldi iteration\n def body(vals):\n krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals\n Av = matvec(vector, *args)\n initial_vals = [Av, krylov_vectors, i, H]\n Av, krylov_vectors, _, H = jax.lax.fori_loop(\n 0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals)\n norm = jax.numpy.linalg.norm(Av)\n Av /= norm\n H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm)\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[i + 1, :],\n jax.numpy.ravel(Av))\n return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter]\n\n def cond_fun(vals):\n # Continue loop while iteration < num_krylov_vecs and norm > eps\n _, _, _, _, norm, _, iteration, _ = vals\n counter_done = (iteration >= num_krylov_vecs)\n norm_not_too_small = norm > eps\n continue_iteration = jax.lax.cond(counter_done,\n _, lambda x: False,\n _, lambda x: norm_not_too_small)\n\n return continue_iteration\n initial_norm = v.real.dtype.type(1.0+eps)\n initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start,\n num_krylov_vecs]\n final_values = jax.lax.while_loop(cond_fun, body, initial_values)\n kvfinal, Hfinal, _, _, norm, _, it, _ = final_values\n return kvfinal, Hfinal, it, norm < eps\n\n return _arnoldi_fact", "def minimum_eigen_vector(x, num_steps, learning_rate, vector_prod_fn):\n x = tf.nn.l2_normalize(x)\n for _ in range(num_steps):\n x = eig_one_step(x, learning_rate, vector_prod_fn)\n return x", "def eigenbasis(self, mat, backward=False):\n if backward:\n mat[:] = multi_dot([self.evecs, mat, self.evecs.transpose()])\n else:\n mat[:] = multi_dot([self.evecs.transpose(), mat, self.evecs])", "def poweig(A, x0, maxiter = 100, ztol= 1.0e-5, mode= 0, teststeps=1):\n m = len(A)\n xi = x0[:] \n \n for n in range(maxiter):\n # matrix vector multiplication.\n xim1 = xi[:]\n for i in range(m):\n xi[i] = 0.0\n for j in range(m):\n xi[i] += A[i][j] * xim1[j]\n print n, xi\n if mode == 0:\n vlen = sqrt(sum([xi[k]**2 for k in range(m)]))\n xi = [xi[k] /vlen for k in range(m)]\n elif mode == 1:\n for k in range(m-1, -1, -1):\n c = abs(xi[k])\n if c > 1.0e-5:\n xi = [xi[k] /c for k in range(m)]\n break\n # early termination test.\n if n % teststeps == 0:\n S = sum([xi[k]-xim1[k] for k in range(m)])\n if abs(S) < ztol:\n break\n #print n, xi\n # Compute Rayleigh quotient.\n numer = sum([xi[k] * xim1[k] for k in range(m)])\n denom = sum([xim1[k]**2 for k in range(m)])\n xlambda = numer/denom\n return xlambda, xi", "def _reset_k_vec( self ):\n self._reset_v_vec( 'k' )", "def find_vector(mat):\r\n eig = np.linalg.eig([[mat[1, 1], mat[1, 2]], [mat[2, 1], mat[2, 2]]])\r\n\r\n minimum = eig[0][0]\r\n index = 0\r\n for i in range(1, 2):\r\n if eig[0][i] < minimum:\r\n minimum = eig[0][i]\r\n index = i\r\n\r\n n = [0, eig[1][0][index], eig[1][1][index]]\r\n n = n / np.linalg.norm(n)\r\n\r\n return n", "def calc_eigendecomp(self):\n self.evals, self.evecs = np.linalg.eigh(self.sub_matrix)", "def truncated_svd(A,k=None):", "def eigenpairs(mat):\n\n\tdef eigenvals(mat):\n\t\tqs, mat = jf.triangularize(mat)\n\t\treturn [mat[i][i] for i in range(len(mat))]\n\n\tvals = eigenvals(mat)\n\tprint(vals, flush = True)\n\tvecs = []\n\tfor eigenval in vals:\n\n\t\tmat_copy = [row[:] for row in mat]\n\t\tfor i in range(len(mat_copy)):\n\t\t\tmat_copy[i][i] -= eigenval\n\t\tkern = vm.numerical_kernel(mat_copy)\n\t\tvecs.append(kern)\n\t\tprint(kern, flush = True)\n\treturn vals, vecs", "def eig_faces(u_mat, nmode, dim):\n n = int(nmode)\n nparray = np.zeros(np.size(u_mat[:,0]))\n for i in range(n):\n nparray = nparray + u_mat[:,i]\n \n nparray = np.reshape(nparray,dim)\n return(nparray)", "def initiateVMatrixes():\n global v, vNew, vExact\n # Initialize the grid to 0\n v = np.zeros((n+1, n+1)) # matrix of v, index are i: row, j:column\n # Set the boundary conditions\n for i in range(1,n):\n v[0,i] = 10\n v[n,i] = 10\n v[i,0] = 10\n v[i,n] = 10\n # Exact solution\n vExact = np.copy(v)\n for i in range(1,n):\n for j in range(1,n):\n vExact[i,j] = 10\n # Initial guess\n for i in range(1,n):\n for j in range(1,n):\n v[i,j] = 0.9*vExact[i,j]\n vNew = np.copy(v)", "def calculate_posvij_matrices(main_tetrad_ark):\n\n # Import all the possible solutions to the Vij matrices\n vij_possibilities = matrix_outerprod_calc.illuminator_of_elfes()\n vij_matrices = []\n\n print(\" \")\n print(\" Calculating Vij matrices\")\n print(\" \")\n # for i in range(0, len(main_tetrad_ark)):\n for i in range(0, len(vij_possibilities)):\n tet_i = [x[1] for x in main_tetrad_ark[i]]\n tri_tet = [np.transpose(i) for i in tet_i]\n print(\"# ********************************\")\n # print(\" \")\n print(\"MATRIX i: \", i)\n print(\" \")\n for j in range(0, len(main_tetrad_ark)):\n tet_j = [x[1] for x in main_tetrad_ark[j]]\n trj_tet = [np.transpose(j) for j in tet_j]\n vij_temp = []\n # print(\"# ********************************\")\n print(\" \")\n print(\"MATRIX j: \", j)\n temp_zero = np.zeros((4,4), dtype=int)\n for x in range(0,len(tet_i)):\n test_1half = np.dot(tri_tet[x],tet_j[x])\n test_2half = np.dot(trj_tet[x],tet_i[x])\n test_difs = np.subtract(test_1half, test_2half)\n # print(\" \")\n # print(test_difs)\n temp_mat = np.dot(tri_tet[x],tet_j[x]) - np.dot(trj_tet[x],tet_i[x])\n vij_temp.append(temp_mat)\n # print(\"\")\n temp_add1 = np.add(vij_temp[0], vij_temp[1])\n temp_add2 = np.add(temp_add1, vij_temp[2])\n tempf = np.add(temp_add2, vij_temp[3])\n # tempf = np.divide(temp_add3, 2)\n for ijx in vij_possibilities:\n if np.array_equal(temp_addf, ijx[0]):\n print(\"*************$$$$$$$$$$$$$$$$$$***************** \")\n print(\"l-solution found:\", ijx[1])\n print(temp_addf)\n print(\"\")\n print(ijx[0])\n if np.array_equal(temp_addf, temp_zero):\n pass\n else:\n vij_matrices.append(temp_addf)\n # print(\"\")\n print(temp_addf)\n # vij_matrices.append(temp_addf)\n vijmats_size = sys.getsizeof(vij_matrices)\n print(\"Size of Vij Matrices list: bytes / kilobytes:\", vijmats_size, vijmats_size/1024)\n print(\"Length of Vij Matrices\")\n print(len(vij_matrices))\n print(vij_matrices)\n pass", "def reorder_eigenvalues(self):\n #vector transformations\n vectors = self.eigenvectors.view(complex).reshape((self.nqpoints, self.nphons, self.nphons))\n \n eig = np.zeros([self.nqpoints,self.nphons])\n eiv = np.zeros([self.nqpoints,self.nphons,self.nphons],dtype=complex)\n #set order at gamma\n order = range(self.nphons)\n eig[0] = self.eigenvalues[0]\n eiv[0] = vectors[0]\n for k in xrange(1,self.nqpoints):\n order = estimate_band_connection(vectors[k-1].T,vectors[k].T,order)\n for n,i in enumerate(order):\n eig[k,n] = self.eigenvalues[k,i]\n eiv[k,n] = vectors[k,i]\n \n #update teh eigenvales with the ordered version\n self.eigenvalues = eig\n self.eigenvectors = eiv.view(float).reshape((self.nqpoints,self.nphons,self.natoms,3,2))", "def _eigenspectrum_lc(self, eigvecs_flag=False):\n if not eigvecs_flag:\n if self._eigvals is None:\n H_lc = self._hamiltonian_lc()\n self._eigvals = H_lc.eigenenergies()\n return self._eigvals\n else:\n if self._eigvals is None or self._eigvecs is None:\n H_lc = self._hamiltonian_lc()\n self._eigvals, self._eigvecs = H_lc.eigenstates()\n return self._eigvals, self._eigvecs", "def expm_krylov(Afunc, dt, vstart: xp.ndarray, block_size=50):\n if not np.iscomplex(dt):\n dt = dt.real\n\n # normalize starting vector\n vstart = xp.asarray(vstart)\n nrmv = float(xp.linalg.norm(vstart))\n assert nrmv > 0\n vstart = vstart / nrmv\n\n alpha = np.zeros(block_size)\n beta = np.zeros(block_size - 1)\n\n V = xp.empty((block_size, len(vstart)), dtype=vstart.dtype)\n V[0] = vstart\n res = None\n\n\n for j in range(len(vstart)):\n \n w = Afunc(V[j])\n alpha[j] = xp.vdot(w, V[j]).real\n\n if j == len(vstart)-1:\n #logger.debug(\"the krylov subspace is equal to the full space\")\n return _expm_krylov(alpha[:j+1], beta[:j], V[:j+1, :].T, nrmv, dt), j+1\n \n if len(V) == j+1:\n V, old_V = xp.empty((len(V) + block_size, len(vstart)), dtype=vstart.dtype), V\n V[:len(old_V)] = old_V\n del old_V\n alpha = np.concatenate([alpha, np.zeros(block_size)])\n beta = np.concatenate([beta, np.zeros(block_size)])\n\n w -= alpha[j]*V[j] + (beta[j-1]*V[j-1] if j > 0 else 0)\n beta[j] = xp.linalg.norm(w)\n if beta[j] < 100*len(vstart)*np.finfo(float).eps:\n # logger.warning(f'beta[{j}] ~= 0 encountered during Lanczos iteration.')\n return _expm_krylov(alpha[:j+1], beta[:j], V[:j+1, :].T, nrmv, dt), j+1\n\n if 3 < j and j % 2 == 0:\n new_res = _expm_krylov(alpha[:j+1], beta[:j], V[:j+1].T, nrmv, dt)\n if res is not None and xp.allclose(res, new_res):\n return new_res, j+1\n else:\n res = new_res\n V[j + 1] = w / beta[j]", "def eigs(self,num_eigvals,manifold_num):\n num_sites = len(self.energies[manifold_num])\n ham = self.manifold_hamiltonian(manifold_num)\n eigvals, eigvecs = eigsh(ham,k=num_eigvals*num_sites,which='SM')\n # Force degenerate eigenvectors to be orthogonal\n if self.qr_flag:\n eigvecs, r = np.linalg.qr(eigvecs,mode='reduced')\n if self.check_eigenvectors:\n HV = ham.dot(eigvecs)\n D = eigvecs.T.dot(HV)\n if np.allclose(D,np.diag(eigvals),rtol=1E-11,atol=1E-11):\n pass\n else:\n # warnings.warn('Eigenvalues altered by QR factorization, max absolute change in diagonal matrix of {}'.format(np.max(D-np.diag(eigvals))))\n warnings.warn('Using eigenvectors to diagonalize hamiltonian does not result in the expected diagonal matrix to tolerance, largest deviation is {}'.format(np.max(np.abs(D - np.diag(eigvals)))))\n \n sort_indices = eigvals.argsort()\n eigvals.sort()\n eigvecs = eigvecs[:,sort_indices]\n if self.qr_flag:\n r = r[:,sort_indices]\n self.r_mats.append(r)\n # I choose to pick the phase of my eigenvectors such that the state which has the\n # largest overlap has a positive overlap. For sufficiently small d, and alpha close\n # to 1, this will be the overlap between the same excited and ground states.\n for i in range(num_eigvals):\n max_index = np.argmax(np.abs(eigvecs[:,i]))\n if eigvecs[max_index,i] < 0:\n eigvecs[:,i] *= -1\n\n return eigvals, eigvecs", "def _z2matvecmul(self, mat, vec):\n prod = np.mod(np.dot(mat, vec), 2)\n return prod", "def eigenvector_centrality(self, iterations = 100):\r\n vectorSet = {}\r\n for key, value in self.adj_list.items():\r\n vectorSet[key] = 1 # init vectorSet of everything = 1\r\n\r\n for i in range(iterations):\r\n for sender in self.adj_list:\r\n for r in sender:\r\n vectorSet[sender] = vectorSet[sender] + 1\r\n\r\n sum = 0\r\n for key, value in vectorSet.items():\r\n sum += value\r\n \r\n for key, value in vectorSet.items():\r\n vectorSet[key] = vectorSet[key] / sum\r\n\r\n return vectorSet" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Solve A x = b for x using the mrestarted GMRES method. This is intended to be called via jax_backend.gmres. Given a linear mapping with (n x n) matrix representation A = A_mv(A_args) gmres_m solves Ax = b (1) where x and b are lengthn vectors, using the method of Generalized Minimum RESiduals with M iterations per restart (GMRES_M).
def gmres_m(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray, x0: jax.ShapedArray, tol: float, atol: float, num_krylov_vectors: int, maxiter: int) -> Tuple[jax.ShapedArray, float, int, bool]: num_krylov_vectors = min(num_krylov_vectors, b.size) x = x0 b_norm = jnp.linalg.norm(b) tol = max(tol * b_norm, atol) for n_iter in range(maxiter): done, beta, x = gmres(A_mv, A_args, b, x, num_krylov_vectors, x0, tol, b_norm) if done: break return x, beta, n_iter, done
[ "def solve_gmres(matvec: Callable,\n b: Any,\n ridge: Optional[float] = None,\n tol: float = 1e-5,\n **kwargs) -> Any:\n if ridge is not None:\n matvec = _make_ridge_matvec(matvec, ridge=ridge)\n return jax.scipy.sparse.linalg.gmres(matvec, b, tol=tol, **kwargs)[0]", "def gmres_residual(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray,\n x: jax.ShapedArray) -> Tuple[jax.ShapedArray, float]:\n r = b - A_mv(x, *A_args)\n beta = jnp.linalg.norm(r)\n return r, beta", "def convergence_gmres_A():\n global conv_residuals\n def compute_residuals(r):\n \"\"\"Helper function to retrieve residual + steps to convergence for\n GMRES operation in Scipy. Used as a callback function for\n scipy.sparse.linalg.gmres\n \"\"\"\n global conv_residuals\n conv_residuals.append(r)\n return\n\n n_search = np.array([20, 40, 60, 80, 100, 120, 140, 160, 180])\n steps_till_conv_n = np.zeros(n_search.size)\n\n for i, n in enumerate(n_search):\n A = construct_matrix_A(n)\n # To average, we loop over 10 times\n for j in range(10):\n b = np.random.randn(n**2)\n conv_residuals = []\n x = scipy.sparse.linalg.gmres(A, b, callback=compute_residuals)\n steps_till_conv_n[i] += len(conv_residuals)\n\n # Divide by 10 to take the average:\n steps_till_conv_n /= 10\n\n fig220 = plt.figure(figsize=(13, 8))\n plt.plot(n_search, steps_till_conv_n)\n plt.xlabel(\"N\")\n plt.ylabel(\"Steps Taken to Converge\")\n plt.title(\"Figure 220 - Steps Taken for GMRES to Converge for Varying N\",\n fontsize=13)\n plt.grid()\n plt.savefig(\"figures/figure220.png\")\n plt.show()\n\n n_search = np.array([10, 50, 100, 150])\n\n fig221 = plt.figure(figsize=(13, 8))\n for i, n in enumerate(n_search):\n A = construct_matrix_A(n)\n b = np.random.randn(n**2)\n conv_residuals = []\n x = scipy.sparse.linalg.gmres(A, b, callback=compute_residuals)\n plt.semilogy(range(len(conv_residuals)), conv_residuals, label=f\"N = {n}\")\n\n plt.xlabel(\"Step Taken to Convergence\")\n plt.ylabel(\"Residuals\")\n plt.title(\"Figure 221 - GMRES Residuals for Varying N\", fontsize=13)\n plt.legend()\n plt.grid()\n plt.savefig(\"figures/figure221.png\")\n plt.show()\n return", "def gmres_krylov_work(gmres_carry: GmresCarryType) -> GmresCarryType:\n gmres_variables, gmres_constants = gmres_carry\n k, V, R, beta_vec, err, givens = gmres_variables\n tol, A_mv, A_args, b_norm, _ = gmres_constants\n\n V, H = kth_arnoldi_step(k, A_mv, A_args, V, R, tol)\n R_col, givens = apply_givens_rotation(H[:, k], givens, k)\n R = jax.ops.index_update(R, jax.ops.index[:, k], R_col[:])\n\n # Update the residual vector.\n cs, sn = givens[:, k] * beta_vec[k]\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k], cs)\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k + 1], sn)\n err = jnp.abs(sn) / b_norm\n gmres_variables = (k + 1, V, R, beta_vec, err, givens)\n return (gmres_variables, gmres_constants)", "def linear_least_squares(a, b, residuals=False):\n if type(a) != np.ndarray or not a.flags[\"C_CONTIGUOUS\"]:\n main_warning(\n \"Matrix a is not a C-contiguous numpy array. The solver will create a copy, which will result\"\n + \" in increased memory usage.\"\n )\n\n a = np.asarray(a, order=\"c\")\n i = dgemm(alpha=1.0, a=a.T, b=a.T, trans_b=True)\n x = np.linalg.solve(i, dgemm(alpha=1.0, a=a.T, b=b))\n\n if residuals:\n return x, np.linalg.norm(np.dot(a, x) - b)\n else:\n return x", "def linear_least_squares(M, v):\n \n B = copy(M)\n [m,n] = shape(B)\n if rank(B) != min(m,n):\n print('Warning: can not be solved since the rank of the matrix is not its maximum value')\n return nan\n else:\n \n A = copy(M)\n At = transpose(M)\n b = copy(v)\n b = transpose(b)\n \n AtA = dot(At, A)\n Atb = transpose(dot(At, b))\n print(AtA, Atb)\n \n x = gauss_elimination(AtA, Atb)\n print('x*:')\n return x", "def run_global_optimization(a, m, nhp):\n # circuit hyperparameters\n clip_size = 1\n cutoff = 15\n\n # generate the initial parameters\n bound = [clip_size, np.pi]*4+[np.pi]*2\n x0 = map(init, bound)\n\n # perform the optimization\n minimizer_kwargs = {\"method\": \"SLSQP\", \"args\": (a, m, cutoff)} # SLSQP L-BFGS-B\n print(\"hopping....\")\n\n res = basinhopping(loss, list(x0), minimizer_kwargs=minimizer_kwargs, niter=nhp)\n\n #print the final restuls\n x_f = res.x\n fidelity, prob, _, _, _ = circuit(x_f, a, m, cutoff)\n print(\"final fid {}, prob {}\".format(fidelity, prob))\n return res.x, fidelity, prob", "def _gmres(self, super_operator, super_rhs, tol):\n return login_gmres(\n super_operator, super_rhs, tol,\n return_residuals=True,\n **SOLVER_OPTIONS\n )", "def solve(self, b, get_resid=True):\n self.context.ma27(b, self.x, self.residual, get_resid)\n return None", "def regularised_least_squares_regression(reg_param, matrix, rhs, support = None):\n sol = np.zeros(matrix.shape[1])\n lsmr_solver = LSMRFramework(matrix[:, support])\n lsmr_solver.solve(rhs, damp=reg_param)\n sol[support] = lsmr_solver.x\n return sol", "def linear_problem(A, b, eps=0.0001):\n\n M = A + eps * speye_like(A)\n return torch.solve(b if b.ndimension() >= 2 else torch.unsqueeze(b, -1),\n M if isdense(M) else M.to_dense())[0].squeeze()", "def spsolve_cpu(A, b):\n def callback(data, indices, indptr, _b):\n _A = csc_matrix((data, indices, indptr))\n return spsolve_scipy(_A, _b)\n\n xk = jax.pure_callback(callback, # callback function\n b, # return type is b\n A.data, # callback function arguments from here on\n A.indices,\n A.indptr,\n b)\n return xk", "def regularization(lam, m, theta) :\n temp = lam/(2.0*m)\n temp *= (numpy.dot(theta.T, theta)-theta[0][0]**2)\n return temp", "def optGM(objective_function: \"function\",\n set_of_mols_par: SetOfMolecules,\n subset_of_mols: SetOfMolecules,\n min_subset_of_mols: SetOfMolecules,\n chg_method: ChargeMethod,\n num_of_samples: int,\n num_of_candidates: int) -> namedtuple:\n\n print(\" Sampling...\")\n samples = lhs(num_of_samples, chg_method.params_bounds)\n\n print(\" Calculating of objective function for samples...\")\n samples_rmsd = [objective_function(sample, chg_method, min_subset_of_mols) for sample in samples]\n\n print(\"\\x1b[2K Selecting candidates...\")\n best_samples = samples[list(map(samples_rmsd.index, nsmallest(num_of_candidates * 100, samples_rmsd)))]\n best_samples_rmsd = [objective_function(sample, chg_method, set_of_mols_par) for sample in best_samples]\n candidates = best_samples[list(map(best_samples_rmsd.index, nsmallest(num_of_candidates, best_samples_rmsd)))]\n\n print(\"\\x1b[2K Local minimizating...\")\n all_loc_min_course = []\n opt_candidates = []\n for params in candidates:\n opt_params, _, loc_min_course = local_minimization(objective_function, subset_of_mols, chg_method, params)\n all_loc_min_course.append(loc_min_course[0])\n opt_candidates.append(opt_params)\n\n opt_candidates_rmsd = [objective_function(candidate, chg_method, set_of_mols_par) for candidate in opt_candidates]\n final_candidate_obj_val = nsmallest(1, opt_candidates_rmsd)\n final_candidate_index = opt_candidates_rmsd.index(final_candidate_obj_val)\n final_candidate = opt_candidates[final_candidate_index]\n\n print(\"\\x1b[2K Final local minimizating...\")\n final_params, final_obj_val, loc_min_course = local_minimization(objective_function, set_of_mols_par, chg_method, final_candidate)\n all_loc_min_course[final_candidate_index].extend(loc_min_course[0])\n\n return namedtuple(\"chgs\", [\"params\",\n \"obj_val\",\n \"loc_min_courses\"])(final_params,\n final_obj_val,\n all_loc_min_course)", "def tensorflow_optimization(m):\n\n fusing.fuse_Transpose_into_Constant(m.graph)\n fusing.fuse_MatMul_and_Add_into_Gemm(m.graph)\n other.topological_sort(m.graph)\n\n m = other.polish_model(m)\n\n # constant folding\n replacing.replace_shape_with_constant(m.graph)\n\n # constant_folding\n m = other.inference_shapes(m)\n while constant_folding.constant_folding(m.graph):\n logging.debug(\"After constant folding jobs.\")\n other.topological_sort(m.graph)\n while len(m.graph.value_info) != 0:\n m.graph.value_info.pop()\n\n m = other.inference_shapes(m)\n replacing.replace_shape_with_constant(m.graph)\n other.topological_sort(m.graph)\n m = tf_pattern_match(m)\n m = optimizer.optimize(m, [\"eliminate_deadend\"])\n\n eliminating.eliminate_consecutive_reshape(m.graph)\n eliminating.eliminate_Squeeze_before_Reshape(m.graph)\n other.topological_sort(m.graph)\n return m", "def gemv(self,transa_,m_,n_,alpha_,a_,x_,beta_,y_):\n _a_minlength = ((n_) * (m_))\n if ((n_) * (m_)) > 0 and a_ is not None and len(a_) != ((n_) * (m_)):\n raise ValueError(\"Array argument a is not long enough: Is %d, expected %d\" % (len(a_),((n_) * (m_))))\n if a_ is None:\n raise ValueError(\"Argument a cannot be None\")\n if a_ is None:\n raise ValueError(\"Argument a may not be None\")\n if isinstance(a_, numpy.ndarray) and a_.dtype is numpy.dtype(numpy.float64) and a_.flags.contiguous:\n _a_copyarray = False\n _a_tmp = ctypes.cast(a_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif a_ is not None:\n _a_copyarray = True\n _a_np_tmp = numpy.zeros(len(a_),numpy.dtype(numpy.float64))\n _a_np_tmp[:] = a_\n assert _a_np_tmp.flags.contiguous\n _a_tmp = ctypes.cast(_a_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _a_copyarray = False\n _a_tmp = None\n \n if ((transa_) == transpose.no):\n __tmp_var_0 = (n_);\n else:\n __tmp_var_0 = (m_);\n _x_minlength = __tmp_var_0\n if __tmp_var_0 > 0 and x_ is not None and len(x_) != __tmp_var_0:\n raise ValueError(\"Array argument x is not long enough: Is %d, expected %d\" % (len(x_),__tmp_var_0))\n if x_ is None:\n raise ValueError(\"Argument x cannot be None\")\n if x_ is None:\n raise ValueError(\"Argument x may not be None\")\n if isinstance(x_, numpy.ndarray) and x_.dtype is numpy.dtype(numpy.float64) and x_.flags.contiguous:\n _x_copyarray = False\n _x_tmp = ctypes.cast(x_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif x_ is not None:\n _x_copyarray = True\n _x_np_tmp = numpy.zeros(len(x_),numpy.dtype(numpy.float64))\n _x_np_tmp[:] = x_\n assert _x_np_tmp.flags.contiguous\n _x_tmp = ctypes.cast(_x_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _x_copyarray = False\n _x_tmp = None\n \n if ((transa_) == transpose.no):\n __tmp_var_1 = (m_);\n else:\n __tmp_var_1 = (n_);\n _y_minlength = __tmp_var_1\n if __tmp_var_1 > 0 and y_ is not None and len(y_) != __tmp_var_1:\n raise ValueError(\"Array argument y is not long enough: Is %d, expected %d\" % (len(y_),__tmp_var_1))\n if isinstance(y_,numpy.ndarray) and not y_.flags.writeable:\n raise ValueError(\"Argument y must be writable\")\n if y_ is None:\n raise ValueError(\"Argument y may not be None\")\n if isinstance(y_, numpy.ndarray) and y_.dtype is numpy.dtype(numpy.float64) and y_.flags.contiguous:\n _y_copyarray = False\n _y_tmp = ctypes.cast(y_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif y_ is not None:\n _y_copyarray = True\n _y_np_tmp = numpy.zeros(len(y_),numpy.dtype(numpy.float64))\n _y_np_tmp[:] = y_\n assert _y_np_tmp.flags.contiguous\n _y_tmp = ctypes.cast(_y_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _y_copyarray = False\n _y_tmp = None\n \n res = __library__.MSK_XX_gemv(self.__nativep,transa_,m_,n_,alpha_,_a_tmp,_x_tmp,beta_,_y_tmp)\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])\n if _y_copyarray:\n y_[:] = _y_np_tmp", "def trust_region_solver(M, g, d_max, max_iter=2000, stepsize=1.0e-3):\n x = g / np.linalg.norm(g) * d_max\n for _ in range(max_iter):\n # gradient ascent\n x = x + stepsize * (M @ x + g)\n # projection to sphere\n x = x / np.linalg.norm(x) * d_max\n ## debug\n #loss = 0.5 * x.T @ M @ x + g.T @ x\n #print(f'Loss: {loss}')\n return x", "def solve(mm):\n model = mm.model\n model.optimize()\n\n\n mm.optimal = model.status\n mm.take_snapshot()\n print \"\\nSnapshot saved as {}\".format(mm.filename)\n mm.solve_count += 1\n mm.update_filename()\n\n if model.status == gp.GRB.OPTIMAL:\n # Write a csv of the solution data\n write_solution(mm)\n\n\n return True", "def least_squares(A, b):\n Q,R = la.qr(A,mode='economic')\n return la.solve(R,Q.T@b)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the residual vector r and its norm, beta, which is minimized by GMRES.
def gmres_residual(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray, x: jax.ShapedArray) -> Tuple[jax.ShapedArray, float]: r = b - A_mv(x, *A_args) beta = jnp.linalg.norm(r) return r, beta
[ "def residual(self, y,r):\n u,v,tt = self.split(y)\n fiu,fiv,fitt = self.problem.internal_forces(u,v,tt)\n R = np.concatenate((fiu,fiv,fitt))\n R = self.residualApplyBCs(R,y,r)\n return R", "def residualNorm2(self):\n r2 = (np.dot(self.x,np.dot(self.AtA,self.x)-2.0*self.Atb) + self.btb)*self.scale\n if self.regularizationLambda > 0:\n r2 -= self.regularizationLambda*np.dot(self.x,self.x)\n return r2", "def update_residual(self):\n mm = _utils.matmul\n self.R = mm(self.A, self.X) - mm(self.B, self.X) * self.E", "def residuals(p, r, theta):\n return r - f(theta, p)", "def residuals(self, b):\n x, y = self.xvals, self.yvals\n return self._numexpr(x, *b) - y", "def probaR(self, r):\n\n if r == 0.:\n return self.__alpha0 + self.__beta + self.__eta / 2.\n\n if r == 1.:\n return self.__alpha1 + self.__beta + self.__eta / 2.\n\n return self.__eta * (3./2. + r - r*r)", "def m_beta(r, m_x, r_x, r_c, beta, **kwargs):\n # analytic enclosed mass inside r_x gives normalization rho_0\n rho_0 = m_x / (4./3 * np.pi * r_x**3 * spec.hyp2f1(\n 3./2, 3 * beta / 2, 5./2, -(r_x / r_c)**2))\n\n m = 4./3 * np.pi * rho_0 * r**3 * spec.hyp2f1(\n 3./2, 3 * beta / 2, 5./2, -(r/r_c)**2)\n\n return m", "def __residuals__(self):\n\t\tself.fitted = np.dot(self.X, self.coef)\n\t\tself.residuals = self.Y - self.fitted \n\t\tself.got_residuals = True", "def gmres_krylov_work(gmres_carry: GmresCarryType) -> GmresCarryType:\n gmres_variables, gmres_constants = gmres_carry\n k, V, R, beta_vec, err, givens = gmres_variables\n tol, A_mv, A_args, b_norm, _ = gmres_constants\n\n V, H = kth_arnoldi_step(k, A_mv, A_args, V, R, tol)\n R_col, givens = apply_givens_rotation(H[:, k], givens, k)\n R = jax.ops.index_update(R, jax.ops.index[:, k], R_col[:])\n\n # Update the residual vector.\n cs, sn = givens[:, k] * beta_vec[k]\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k], cs)\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k + 1], sn)\n err = jnp.abs(sn) / b_norm\n gmres_variables = (k + 1, V, R, beta_vec, err, givens)\n return (gmres_variables, gmres_constants)", "def beta_model(r, s0, rc, beta, c):\n return s0 * np.power((1.0+(r/rc)**2), 0.5-3*beta) + c", "def rms(self, x: 'vnl_vectorD') -> \"double\":\n return _vnl_least_squares_functionPython.vnl_least_squares_function_rms(self, x)", "def residual(params, data):\n alpha = params[\"alpha\"].value\n # note that alpha = mu / (k_B * T)\n model = mathematics.Fermi_integral(alpha, 0.5)\n complexResidue = abs(data - model)\n return complexResidue # noqa: RET504", "def residual_distribution(self):\n norm = stats.norm\n mu, std = norm.fit(self.residuals)\n # mu = 0 # By definition, mu of resids = 0, but the fit provides approximately 0. It's perhaps best to just\n # set mu=0?\n return norm(loc=mu, scale=std)", "def residuals(self, b):\n x, y = self.xvals, self.yvals\n return self._numexpr(x, *b) - y", "def residuals(self, ts, rvs, p):\n\n if p.npl == 0:\n return rvs\n else:\n rvmodel = np.sum(rv.rv_model(ts,p), axis=0)\n return rvs - rvmodel", "def make_residuals_vector(self,res_rings=[1,2,3]):\n F = np.zeros(28) # largest number of parameters\n\n # select observations according to ring number \n # (specify outermost ring above)\n self.set_fit_indices(res_rings)\n\n H = np.zeros((self.fit_nobs,self.npar))\n\n self.X = np.zeros(self.fit_nobs)\n self.Y = np.zeros(self.fit_nobs)\n self.Phase = np.zeros(self.fit_nobs)\n self.D = np.zeros(self.fit_nobs)\n self.ID = np.zeros(self.fit_nobs,dtype='int')\n self.RING = np.zeros(self.fit_nobs,dtype='int')\n self.SEG = np.zeros(self.fit_nobs,dtype='int')\n\n ipt = 0\n for i in self.fit_indices:\n self.X[ipt] = self.XVec[i,0]\n self.Y[ipt] = self.XVec[i,1]\n self.Phase[ipt] = 1./(1.+(self.X[ipt]**2+self.Y[ipt]**2)/4./17500.**2)\n self.D[ipt] = self.set_basis_data(i)\n self.ID[ipt] = self.id[i]\n self.RING[ipt] = self.ring[i]\n self.SEG[ipt] = self.seg[i]\n\n self.set_basis_functions(i,F)\n for j in range(self.npar):\n H[ipt,j] = F[j]\n ipt = ipt + 1\n\n self.RESIDUALS = self.D - np.dot(H,self.P)", "def calc_residuals(self, parameters, fjac=None):\n \n residuals=self.curve-(parameters[0]*self.convolution_w_exp(parameters[1]))\n self.residuals=residuals\n status=0\n #return squared sum of res.\n return ([status,residuals])", "def residual_G2D_norotation(pars,x,y,data=None, eps=None):\n\tparvals = pars.valuesdict() # a Parameters() object is passed as \"pars\"\n\tintensity_max = parvals[\"I_zero\"]\n\tcenterposition_x = parvals[\"x_zero\"]\n\tcenterposition_y = parvals[\"y_zero\"]\n\tbeamwidth_x = parvals[\"omegaX_zero\"]\n\tbeamwidth_y = parvals[\"omegaY_zero\"]\n\tbgr = parvals[\"backgr\"]\n\t\n\n\tmodel = intensity_max*np.exp(-2*np.power(x-centerposition_x,2)/beamwidth_x**2 - \\\n\t\t2*np.power(y-centerposition_y,2)/beamwidth_y**2) + bgr\n\tif data is None:\n\t\treturn np.array(model) # we don't flatten here because this is for plotting\n\tif eps is None:\n\t\tresid = np.array(model - data)\n\t\treturn resid.flatten() # minimization array must be flattened (LMFIT FAQ)\n\telse:\n\t\tresid = np.array((model - data)/eps)\n\t\treturn resid.flatten()", "def residual(self,name):\n state = self.getstate(name)\n m = self.hit.vec \n x = state.vec\n res = m - self.hmatrix*x\n debug('kfnode.residual',(name,res))\n return res" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Performs a single iteration of gmres_krylov. See that function for a more detailed description.
def gmres_krylov_work(gmres_carry: GmresCarryType) -> GmresCarryType: gmres_variables, gmres_constants = gmres_carry k, V, R, beta_vec, err, givens = gmres_variables tol, A_mv, A_args, b_norm, _ = gmres_constants V, H = kth_arnoldi_step(k, A_mv, A_args, V, R, tol) R_col, givens = apply_givens_rotation(H[:, k], givens, k) R = jax.ops.index_update(R, jax.ops.index[:, k], R_col[:]) # Update the residual vector. cs, sn = givens[:, k] * beta_vec[k] beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k], cs) beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k + 1], sn) err = jnp.abs(sn) / b_norm gmres_variables = (k + 1, V, R, beta_vec, err, givens) return (gmres_variables, gmres_constants)
[ "def gmres_m(A_mv: Callable, A_args: Sequence,\n b: jax.ShapedArray, x0: jax.ShapedArray, tol: float,\n atol: float, num_krylov_vectors: int,\n maxiter: int) -> Tuple[jax.ShapedArray, float, int, bool]:\n num_krylov_vectors = min(num_krylov_vectors, b.size)\n x = x0\n b_norm = jnp.linalg.norm(b)\n tol = max(tol * b_norm, atol)\n for n_iter in range(maxiter):\n done, beta, x = gmres(A_mv, A_args, b, x, num_krylov_vectors, x0, tol,\n b_norm)\n if done:\n break\n return x, beta, n_iter, done", "def convergence_gmres_A():\n global conv_residuals\n def compute_residuals(r):\n \"\"\"Helper function to retrieve residual + steps to convergence for\n GMRES operation in Scipy. Used as a callback function for\n scipy.sparse.linalg.gmres\n \"\"\"\n global conv_residuals\n conv_residuals.append(r)\n return\n\n n_search = np.array([20, 40, 60, 80, 100, 120, 140, 160, 180])\n steps_till_conv_n = np.zeros(n_search.size)\n\n for i, n in enumerate(n_search):\n A = construct_matrix_A(n)\n # To average, we loop over 10 times\n for j in range(10):\n b = np.random.randn(n**2)\n conv_residuals = []\n x = scipy.sparse.linalg.gmres(A, b, callback=compute_residuals)\n steps_till_conv_n[i] += len(conv_residuals)\n\n # Divide by 10 to take the average:\n steps_till_conv_n /= 10\n\n fig220 = plt.figure(figsize=(13, 8))\n plt.plot(n_search, steps_till_conv_n)\n plt.xlabel(\"N\")\n plt.ylabel(\"Steps Taken to Converge\")\n plt.title(\"Figure 220 - Steps Taken for GMRES to Converge for Varying N\",\n fontsize=13)\n plt.grid()\n plt.savefig(\"figures/figure220.png\")\n plt.show()\n\n n_search = np.array([10, 50, 100, 150])\n\n fig221 = plt.figure(figsize=(13, 8))\n for i, n in enumerate(n_search):\n A = construct_matrix_A(n)\n b = np.random.randn(n**2)\n conv_residuals = []\n x = scipy.sparse.linalg.gmres(A, b, callback=compute_residuals)\n plt.semilogy(range(len(conv_residuals)), conv_residuals, label=f\"N = {n}\")\n\n plt.xlabel(\"Step Taken to Convergence\")\n plt.ylabel(\"Residuals\")\n plt.title(\"Figure 221 - GMRES Residuals for Varying N\", fontsize=13)\n plt.legend()\n plt.grid()\n plt.savefig(\"figures/figure221.png\")\n plt.show()\n return", "def evaluate_keypoint_net_SP(ori_dir,warp_dir,sess,top_k,input_img_tensor,output_prob_nms_tensor,output_desc_tensors,conf_threshold ):\n # keypoint_net.eval()\n # keypoint_net.training = False\n\n\n localization_err, repeatability = [], []\n correctness1, correctness3, correctness5, MScore = [], [], [], []\n\n\n image_paths = []\n warped_image_paths = []\n file_ext = \".png\"\n\n folder_paths = [x for x in Path(warp_dir).iterdir() if x.is_dir()]\n for trans_dir_all in folder_paths:\n ori_name = str(trans_dir_all).split(\"/\")[-1]\n img1 = preprocess_image(ori_dir+ori_name+file_ext)\n # pridict!\n out1 = sess.run([output_prob_nms_tensor, output_desc_tensors],\n feed_dict={input_img_tensor: np.expand_dims(img1, 0)})\n\n\n\n keypoint_map1 = np.squeeze(out1[0])\n descriptor_map1 = np.squeeze(out1[1])\n\n kp1, des1 = extract_superpoint_keypoints_and_descriptors(\n keypoint_map1, descriptor_map1, top_k,confidence=conf_threshold)\n\n ori_img = cv2.imread(ori_dir+ori_name+file_ext)\n outimg = ori_img.copy()\n cv2.drawKeypoints(ori_img,kp1,outimg)\n # cv2.imshow(\"\",outimg)\n # cv2.waitKey(0)\n\n\n\n\n for trans_dir_sig in trans_dir_all.iterdir():\n H_dir = np.load(str(trans_dir_sig)+\"/GT_H_mat.npz\")\n num_images = 6 # for each kind of trans we have 6\n for trans_name in [f for f in os.listdir(trans_dir_sig) if f.endswith('.png')]:\n # print(\"now its the \",str(trans_dir_sig)+\"/\"+trans_name+file_ext)\n trans_name = trans_name[:-4]\n # image_paths.append(ori_dir+ori_name+file_ext)\n # warp_path = str(trans_dir_sig)+\"/\"+trans_name+file_ext\n H = H_dir[trans_name]\n img2 = preprocess_image(str(trans_dir_sig)+\"/\"+trans_name+file_ext)\n shape = img2.shape\n distance_thresh = 3\n out2 = sess.run([output_prob_nms_tensor, output_desc_tensors],\n feed_dict={input_img_tensor: np.expand_dims(img2, 0)})\n keypoint_map2 = np.squeeze(out2[0])\n descriptor_map2 = np.squeeze(out2[1])\n# des2 300*256\n kp2, des2 = extract_superpoint_keypoints_and_descriptors(\n keypoint_map2, descriptor_map2, top_k,confidence=conf_threshold) # for leared feature, the conf and top k are already filtered here\n\n\n ori_img2 = cv2.imread(str(trans_dir_sig)+\"/\"+trans_name+file_ext)\n outimg2 = ori_img2.copy()\n cv2.drawKeypoints(ori_img2,kp2,outimg2)\n # cv2.imshow(\"\",np.concatenate((outimg,outimg2),axis = 1))\n # cv2.waitKey(0)\n #kp1 kp2 should be fixed\n\n N1, N2, rep, loc_err= compute_repeatability(kp1,kp2,H,shape,distance_thresh = 3)\n\n\n\n repeatability.append(rep)\n localization_err.append(loc_err)\n\n # Compute correctness\n # c1, c2, c3 = compute_homography(data, keep_k_points=top_k)\n c1, c2, c3 = compute_homography(H,shape,kp1,kp2,des1,des2)\n correctness1.append(c1)\n correctness3.append(c2)\n correctness5.append(c3)\n\n # Compute matching score\n mscore = compute_matching_score(H,shape,kp1,kp2,des1,des2)\n MScore.append(mscore)\n\n return np.nanmean(repeatability), np.nanmean(localization_err),\\\n np.nanmean(correctness1), np.nanmean(correctness3), np.nanmean(correctness5),np.nanmean(MScore)", "def evaluate(self, results, res_folder=None, metric='PCK', **kwargs):\n metrics = metric if isinstance(metric, list) else [metric]\n allowed_metrics = ['PCK', 'tPCK']\n for metric in metrics:\n if metric not in allowed_metrics:\n raise KeyError(f'metric {metric} is not supported')\n\n if res_folder is not None:\n tmp_folder = None\n res_file = osp.join(res_folder, 'result_keypoints.json')\n else:\n tmp_folder = tempfile.TemporaryDirectory()\n res_file = osp.join(tmp_folder.name, 'result_keypoints.json')\n\n kpts = []\n\n for result in results:\n preds = result['preds']\n boxes = result['boxes']\n image_paths = result['image_paths']\n bbox_ids = result['bbox_ids']\n\n # convert 0-based index to 1-based index,\n # and get the first two dimensions.\n preds[..., :2] += 1.0\n batch_size = len(image_paths)\n for i in range(batch_size):\n image_id = self.name2id[image_paths[i][len(self.img_prefix):]]\n kpts.append({\n 'keypoints': preds[i],\n 'center': boxes[i][0:2],\n 'scale': boxes[i][2:4],\n 'area': boxes[i][4],\n 'score': boxes[i][5],\n 'image_id': image_id,\n 'bbox_id': bbox_ids[i]\n })\n kpts = self._sort_and_unique_bboxes(kpts)\n\n self._write_keypoint_results(kpts, res_file)\n info_str = self._report_metric(res_file, metrics)\n name_value = OrderedDict(info_str)\n\n if tmp_folder is not None:\n tmp_folder.cleanup()\n\n return name_value", "def main(logger, resultsDict):\n\n print(\"=\" * 30)\n print(\"Main function of overlayMasks.\")\n print(\"=\" * 30)\n\n # Get parameters from .json files.\n full_img_dir = config_overlay[\"full_img_dir\"]\n y_true_dir = config_overlay[\"y_true_dir\"]\n y_pred_dir = config_overlay[\"y_pred_dir\"]\n extension = config_overlay[\"extension\"]\n target_size = (config_overlay[\"target_size\"], config_overlay[\"target_size\"])\n save_maskoverlay_dir = config_overlay[\"save_maskoverlay_dir\"]\n save_fulloverlay_dir = config_overlay[\"save_fulloverlay_dir\"]\n\n # ------------\n\n # Get paths.\n full_img_paths_list = []\n y_true_paths_list = []\n y_pred_paths_list = []\n\n for full in os.listdir(full_img_dir):\n if full.endswith(extension):\n full_img_paths_list.append(os.path.join(full_img_dir, full))\n\n for full in os.listdir(y_true_dir):\n if full.endswith(extension):\n y_true_paths_list.append(os.path.join(y_true_dir, full))\n\n for full in os.listdir(y_pred_dir):\n if full.endswith(extension):\n y_pred_paths_list.append(os.path.join(y_pred_dir, full))\n\n full_img_paths_list.sort()\n y_true_paths_list.sort()\n y_pred_paths_list.sort()\n\n # ------------\n\n # Load full_img.\n full_img_arrays = [\n cv2.resize(src=cv2.imread(path, cv2.IMREAD_GRAYSCALE), dsize=target_size)\n for path in full_img_paths_list\n ]\n\n # Load y_true masks.\n y_true_arrays = [\n cv2.resize(src=cv2.imread(path, cv2.IMREAD_GRAYSCALE), dsize=target_size)\n for path in y_true_paths_list\n ]\n\n # Load y_pred masks.\n y_pred_arrays = [\n cv2.resize(src=cv2.imread(path, cv2.IMREAD_GRAYSCALE), dsize=target_size)\n for path in y_pred_paths_list\n ]\n\n print(full_img_arrays[0].min(), full_img_arrays[0].max())\n print(y_true_arrays[0].min(), y_true_arrays[0].max())\n print(y_pred_arrays[0].min(), y_pred_arrays[0].max())\n\n # ------------\n\n # Stack to create RGB version of grayscale images.\n full_img_rgb = [np.stack([img, img, img], axis=-1) for img in full_img_arrays]\n\n # Green true mask. Note OpenCV uses BGR.\n y_true_rgb = [\n np.stack([np.zeros_like(img), img, np.zeros_like(img)], axis=-1)\n for img in y_true_arrays\n ]\n\n # Red predicted mask. Note OpenCV uses BGR.\n y_pred_rgb = [\n np.stack([np.zeros_like(img), np.zeros_like(img), img], axis=-1)\n for img in y_pred_arrays\n ]\n\n # ------------\n\n for i in range(len(full_img_rgb)):\n\n # First overlay true and predicted masks.\n overlay_masks = cv2.addWeighted(\n src1=y_true_rgb[i], alpha=0.5, src2=y_pred_rgb[i], beta=1, gamma=0\n )\n\n # Then overlay full_img and masks.\n overlay_all = cv2.addWeighted(\n src1=full_img_rgb[i], alpha=1, src2=overlay_masks, beta=0.5, gamma=0\n )\n\n # Save.\n\n # Get patient ID from y_true masks.\n filename = os.path.basename(y_true_paths_list[i])\n filename_split = filename.split(\"_\")\n patientID = \"_\".join([filename_split[i] for i in range(4)])\n\n masks_filename = patientID + \"___MasksOverlay.png\"\n all_filename = patientID + \"___AllOverlay.png\"\n\n save_path_masks = os.path.join(save_maskoverlay_dir, masks_filename)\n save_path_all = os.path.join(save_fulloverlay_dir, all_filename)\n\n print(save_path_masks)\n print(save_path_all)\n\n cv2.imwrite(filename=save_path_masks, img=overlay_masks)\n cv2.imwrite(filename=save_path_all, img=overlay_all)", "def run_kohonen(data, size_k: int=6, sigma: float=2.0, eta: int=0.9, \n tmax: int=5000, convergence=0):\n dim = 28*28\n data_range = 255.0\n dy, dx = data.shape\n \n #convergence criteria\n eps = 1E-6\n eps_2 = 0.1\n \n #initialise the centers randomly\n centers = np.random.rand(size_k**2, dim) * data_range\n \n #build a neighborhood matrix\n neighbor = np.arange(size_k**2).reshape((size_k, size_k))\n \n #set the random order in which the datapoints should be presented\n i_random = np.arange(tmax) % dy\n np.random.shuffle(i_random)\n \n #error for convergence criterion\n error = [np.inf]\n \n print('start iteration')\n for t, i in enumerate(i_random):\n old_centers = copy(centers)\n som_step(centers, data[int(i),:],neighbor,eta,sigma)\n \n if t % 1E4 == 0:\n print('iteration {}'.format(t))\n \n if convergence == 1:\n #convergence: distance between samples and best matching prototypes \n error.append(calculate_error(centers,data))\n# if np.abs((error[-2]-error[-1])/error[1]) < eps :\n# break\n \n elif convergence == 2:\n #convergence: non significant weight update\n err = np.linalg.norm(centers-old_centers)\n error.append(err)\n# if err < eps_2:\n# break\n \n \"\"\" # for visualization, you can use this:\n for i in range(size_k**2):\n plb.subplot(size_k,size_k,i)\n \n plb.imshow(np.reshape(centers[i,:], [28, 28]),interpolation='bilinear')\n plb.axis('off')\n \n # leave the window open at the end of the loop\n plb.show()\n plb.draw() \"\"\"\n \n print('Total iteration : {}'.format(t))\n return centers, error[1:]", "def iterateSolution(self):\n raise NotImplementedError", "def kmeans_004():\n crops = [200] # Should probably also add 250\n scales = [30, 50] # Scaling is probably the most important part here\n\n scores = []\n for s in scales:\n crop = 200\n n_centroids = 1600\n n_patches = 400000\n # rf_size = int(round(s * .2))\n rf_size = 10\n logger.info(\"Training with crop {}, scale {}, patch size {}, patches {}, centroids {}\".format(crop, s, rf_size, n_patches, n_centroids))\n\n train_x_crop_scale = CropScaleImageTransformer(training=True,\n result_path='data/data_train_crop_{}_scale_{}.npy'.format(crop, s),\n crop_size=crop,\n scaled_size=s,\n n_jobs=-1,\n memmap=True)\n\n # spherical generator\n kmeans_generator = KMeansFeatureGenerator(n_centroids=n_centroids,\n rf_size=rf_size,\n result_path='data/mdl_kmeans_004_scale_{}_rf_{}'.format(s, rf_size),\n n_iterations=20,\n n_jobs=-1,)\n\n patch_extractor = models.KMeansFeatures.PatchSampler(n_patches=n_patches,\n patch_size=rf_size,\n n_jobs=-1)\n images = train_x_crop_scale.transform()\n logger.info(\"Images ndarray shape: {}\".format(images.shape))\n patches = patch_extractor.transform(images)\n logger.info(\"Patches ndarray shape: {}\".format(patches.shape))\n\n kmeans_generator.fit(patches)\n\n del patches\n gc.collect()\n\n train_x = kmeans_generator.transform(images, save_to_file='data/data_kmeans_features_004_scale_{}_rf_{}.npy'.format(s, rf_size), memmap=True)\n train_y = classes.train_solutions.data\n # Unload some objects\n del images\n gc.collect()\n logger.info(\"Train X ndarray shape: {}\".format(train_x.shape))\n\n wrapper = ModelWrapper(models.Ridge.RidgeRFEstimator, {'alpha': 500, 'n_estimators': 250}, n_jobs=-1)\n wrapper.cross_validation(train_x, train_y, n_folds=2, parallel_estimator=True)\n scores.append((s, wrapper.cv_scores))\n del wrapper\n gc.collect()", "def do_pnp(pts3d_for_pnp, pts2d_for_pnp, K, iterations=200, reprojThresh=5):\n list_pts3d_for_pnp = pts3d_for_pnp\n list_pts2d_for_pnp = pts2d_for_pnp\n pts3d_for_pnp = np.array(pts3d_for_pnp)\n # pts2d_for_pnp = np.expand_dims(np.squeeze(np.array(pts2d_for_pnp)), axis=1)\n # print(pts3d_for_pnp)\n # print(pts2d_for_pnp.shape)\n num_pts = len(pts3d_for_pnp)\n print(num_pts)\n highest_inliers = 0\n for j in range(iterations):\n pt_idxs = np.random.choice(num_pts, 6, replace=False)\n pts3 = np.array([pts3d_for_pnp[pt_idxs[i]] for i in range(len(pt_idxs))])\n # print(\"pts\",pts3)\n pts2 = np.array([pts2d_for_pnp[pt_idxs[i]] for i in range(len(pt_idxs))])\n _, rvec, tvec = cv2.solvePnP(pts3, pts2, K, distCoeffs=np.array([]), flags=cv2.SOLVEPNP_ITERATIVE)\n R, _ = cv2.Rodrigues(rvec)\n pnp_errors, projpts, avg_err, perc_inliers = test_reproj_pnp_points(list_pts3d_for_pnp, list_pts2d_for_pnp, R, tvec, K, rep_thresh=reprojThresh)\n if highest_inliers < perc_inliers:\n highest_inliers = perc_inliers\n best_R = R\n best_tvec = tvec\n R = best_R\n tvec = best_tvec\n # print('rvec:', rvec,'\\n\\ntvec:', tvec)\n print(\"avg\",avg_err)\n print(\"inlier\",perc_inliers)\n return R, tvec", "def voc_pred_process(pred_data, val_cls, recs):\n num_classes = config.num_classes\n cls_img_ids = {}\n cls_bboxes = {}\n cls_scores = {}\n classes = {}\n cls_npos = {}\n for cls in val_cls:\n if cls == 'background':\n continue\n class_recs = {}\n npos = 0\n for imagename in imagenames:\n R = [obj for obj in recs[imagename] if obj['name'] == cls]\n bbox = np.array([x['bbox'] for x in R])\n difficult = np.array([x['difficult'] for x in R]).astype(np.bool)\n det = [False] * len(R)\n npos = npos + sum(~difficult)\n class_recs[imagename] = {'bbox': bbox,\n 'difficult': difficult,\n 'det': det}\n cls_npos[cls] = npos\n classes[cls] = class_recs\n cls_img_ids[cls] = []\n cls_bboxes[cls] = []\n cls_scores[cls] = []\n\n for sample in pred_data:\n pred_boxes = sample['boxes']\n box_scores = sample['box_scores']\n img_id = sample['img_id']\n h, w = sample['image_shape']\n\n final_boxes = []\n final_label = []\n final_score = []\n\n for c in range(1, num_classes):\n class_box_scores = box_scores[:, c]\n score_mask = class_box_scores > config.min_score\n class_box_scores = class_box_scores[score_mask]\n class_boxes = pred_boxes[score_mask] * [h, w, h, w]\n\n if score_mask.any():\n nms_index = apply_nms(class_boxes, class_box_scores, config.nms_threshold, config.max_boxes)\n class_boxes = class_boxes[nms_index]\n class_box_scores = class_box_scores[nms_index]\n\n final_boxes += class_boxes.tolist()\n final_score += class_box_scores.tolist()\n final_label += [c] * len(class_box_scores)\n\n for loc, label, score in zip(final_boxes, final_label, final_score):\n cls_img_ids[val_cls[label]].append(img_id)\n cls_bboxes[val_cls[label]].append([loc[1], loc[0], loc[3], loc[2]])\n cls_scores[val_cls[label]].append(score)\n return classes, cls_img_ids, cls_bboxes, cls_scores, cls_npos", "def generate_images_pred(self, inputs, outputs):\n for scale in self.opt.scales:\n disp = outputs[(\"disp\", scale)]\n if self.opt.v1_multiscale:\n source_scale = scale\n else:\n disp = F.interpolate(\n disp, [self.opt.height, self.opt.width], mode=\"bilinear\", align_corners=False)\n source_scale = 0\n\n _, depth = disp_to_depth(disp, self.opt.min_depth, self.opt.max_depth)\n\n outputs[(\"depth\", 0, scale)] = depth\n #print('maximum depth',torch.max(outputs[(\"depth\", 0, 0)]))\n\n for i, frame_id in enumerate(self.opt.frame_ids[1:]):\n\n if frame_id == \"s\":\n T = inputs[\"stereo_T\"]\n else:\n T = outputs[(\"cam_T_cam\", 0, frame_id)]\n\n # from the authors of https://arxiv.org/abs/1712.00175\n if self.opt.pose_model_type == \"posecnn\":\n\n axisangle = outputs[(\"axisangle\", 0, frame_id)]\n translation = outputs[(\"translation\", 0, frame_id)]\n\n inv_depth = 1 / depth\n mean_inv_depth = inv_depth.mean(3, True).mean(2, True)\n\n T = transformation_from_parameters(\n axisangle[:, 0], translation[:, 0] * mean_inv_depth[:, 0], frame_id < 0)\n\n cam_points = self.backproject_depth[source_scale](\n depth, inputs[(\"inv_K\", source_scale)], self.batch_index)\n pix_coords = self.project_3d[source_scale](\n cam_points, inputs[(\"K\", source_scale)], T, self.batch_index)\n\n outputs[(\"sample\", frame_id, scale)] = pix_coords\n # warping \n outputs[(\"color\", frame_id, scale)] = F.grid_sample(\n inputs[(\"color\", frame_id, source_scale)],\n outputs[(\"sample\", frame_id, scale)],\n padding_mode=\"border\")\n\n if not self.opt.disable_automasking:\n outputs[(\"color_identity\", frame_id, scale)] = \\\n inputs[(\"color\", frame_id, source_scale)]\n #print('maximum depth',torch.max(outputs[(\"depth\", 0, 0)]))", "def test_k_limits():\n\n test_params = inclined_sersic_regression_test_parameters\n if __name__ == '__main__':\n test_params += inclined_exponential_test_parameters\n test_params += inclined_sersic_test_parameters\n\n for mode in (\"InclinedExponential\", \"InclinedSersic\"):\n\n for (_, _, inc_angle, scale_radius, scale_height, _, _) in test_params:\n\n # Get float values for the details\n inc_angle = float(inc_angle)\n scale_radius = float(scale_radius)\n scale_height = float(scale_height)\n\n gsparams = galsim.GSParams()\n\n # Now make a test image\n test_profile = get_prof(mode, inc_angle * galsim.radians, scale_radius, scale_height)\n\n # Check that the k value at maxk is below maxk_threshold in both the x and y dimensions\n kx = test_profile.maxk\n ky = test_profile.maxk\n\n kx_value = test_profile.kValue(kx=kx, ky=0.)\n np.testing.assert_(np.abs(kx_value) < gsparams.maxk_threshold,\n msg=\"kx_value is not below maxk_threshold: \" + str(kx_value) + \" >= \"\n + str(gsparams.maxk_threshold))\n\n ky_value = test_profile.kValue(kx=0., ky=ky)\n np.testing.assert_(np.abs(ky_value) < gsparams.maxk_threshold,\n msg=\"ky_value is not below maxk_threshold: \" + str(ky_value) + \" >= \"\n + str(gsparams.maxk_threshold))\n\n # Check that less than folding_threshold fraction of light falls outside r = pi/stepk\n rmax = np.pi / test_profile.stepk\n pixel_scale = 0.1\n test_image = galsim.Image(int(2*rmax/pixel_scale), int(2*rmax/pixel_scale),\n scale=pixel_scale)\n test_profile.drawImage(test_image)\n\n # Get an array of indices within the limits\n image_shape = np.shape(test_image.array)\n x, y = np.indices(image_shape, dtype=float)\n\n image_center = test_image.center\n x -= image_center.x\n y -= image_center.y\n\n # Include all pixels that are at least partially within distance r of the centre\n r = pixel_scale * np.sqrt(np.square(x) + np.square(y))\n good = r < rmax + np.sqrt(2.)*pixel_scale\n\n # Get flux within the limits\n contained_flux = np.ravel(test_image.array)[np.ravel(good)].sum()\n\n # Check that we're not missing too much flux\n total_flux = np.sum(test_image.array)\n assert (total_flux-contained_flux)/total_flux <= gsparams.folding_threshold", "def do_recursions(self):\n for _ in range(self.iterations):\n self.features = self.do_a_recursion()", "def evaluate_iterations(predictions, gold, result_collector, condition):\n for iteration_id, texts in predictions.items():\n texts_in_iteration = sorted(texts)\n gold_trees = [gold[tid] for tid in texts_in_iteration]\n pred_trees = [texts[tid] for tid in texts_in_iteration]\n for level, scores in eval_prediction(gold_trees, pred_trees):\n result_collector.add_result(condition, iteration_id, level, scores)", "def algorithm_loop(self):", "def rigid3d_proc(point_clouds, rgb_images, depth_images, np_kps_pre_img, cv_kps_pre_img, cv_des_pre_img,\n save_intermediate=False, out_folder=None, image_set_name=None, poisson=True, plot=True):\n pcds = make_pcds(point_clouds)\n kps_3d = make_3d_kps_depth_img(depth_images, np_kps_pre_img)\n all_results = []\n\n # perform global registration between every 2 consecutive images\n for i in range(1, len(pcds)):\n img1, kp1, des1 = rgb_images[i], cv_kps_pre_img[i], cv_des_pre_img[i]\n img2, kp2, des2 = rgb_images[i - 1], cv_kps_pre_img[i - 1], cv_des_pre_img[i - 1]\n\n bf_matches = q8.mathching_skimage(img1, kp1, des1, img2, kp2, des2, plot)\n H_matrix, matchs = q9.ransac_loop(img1, img2, kp1, kp2, bf_matches)\n\n m_kps1_3d = []\n m_kps2_3d = []\n\n for m in matchs:\n m_kps1_3d.append(kps_3d[i][m[0]])\n m_kps2_3d.append(kps_3d[i - 1][m[1]])\n\n R, t = r3d.rigid_transform_3D(np.array(m_kps1_3d).T, np.array(m_kps2_3d).T)\n Hmatrix = np.pad(R, ((0, 1), (0, 1)))\n Hmatrix[3, 3] = 1\n Hmatrix[0, 3] = t[0, 0]\n Hmatrix[1, 3] = t[1, 0]\n Hmatrix[2, 3] = t[2, 0]\n\n print(t)\n if plot:\n o3d_utils.visualize_transformation(pcds[i], pcds[i - 1], Hmatrix)\n\n print(Hmatrix)\n all_results.append(Hmatrix)\n\n # chain all point clouds together with computed transformation\n chain_transformation(pcds, all_results, save_intermediate, out_folder, image_set_name, poisson, plot)", "def run_kohonen_dynamicLearningRate(data,fun,size_k: int=6, eta: float=0.1, tmax: int=5000, convergence=0):\n dim = 28*28\n data_range = 255.0\n dy, dx = data.shape\n \n #initialise the centers randomly\n centers = np.random.rand(size_k**2, dim) * data_range\n \n #build a neighborhood matrix\n neighbor = np.arange(size_k**2).reshape((size_k, size_k))\n \n #set the random order in which the datapoints should be presented\n i_random = np.arange(tmax) % dy\n np.random.shuffle(i_random)\n \n #error for convergence criterion\n error = [np.inf]\n\n for t, i in enumerate(i_random):\n old_centers = copy(centers)\n sigma = fun(t)\n som_step(centers, data[i,:],neighbor,eta,sigma)\n \n if t % 1E4 == 0:\n print('iteration {}'.format(t))\n \n if convergence == 1:\n #convergence: distance between samples and best matching prototypes \n error.append(calculate_error(centers,data))\n# if np.abs((error[-2]-error[-1])/error[1]) < eps :\n# break\n \n elif convergence == 2:\n #convergence: non significant weight update\n err = np.linalg.norm(centers-old_centers)\n error.append(err)\n# if err < eps_2:\n# break\n\n \"\"\" # for visualization, you can use this:\n for i in range(size_k**2):\n plb.subplot(size_k,size_k,i)\n \n plb.imshow(np.reshape(centers[i,:], [28, 28]),interpolation='bilinear')\n plb.axis('off')\n \n # leave the window open at the end of the loop\n plb.show()\n plb.draw() \"\"\"\n return centers, error[1:]", "def process_LPJ_GUESS_rasters(workspace_dir):\n data_dict = base_data_dict('10km')\n aligned_raster_dir = os.path.join(workspace_dir, 'resampled_kg_m-2')\n if not os.path.exists(aligned_raster_dir):\n os.makedirs(aligned_raster_dir)\n\n raw_tif_dir = r'F:/LPJ_Guess_carbon_scenarios/exported_geotiff'\n LPJ_GUESS_basename_list = [\n 'LPJ-GUESS_rcp2p6_IMAGE_cVeg_2015_1x1.tif',\n 'LPJ-GUESS_rcp2p6_IMAGE_cVeg_2050_1x1.tif',\n 'LPJ-GUESS_rcp6p0_AIM_cVeg_2015_1x1.tif',\n 'LPJ-GUESS_rcp6p0_AIM_cVeg_2050_1x1.tif',\n 'LPJ-GUESS_rcp8p5_MAGPIE_cVeg_2015_1x1.tif',\n 'LPJ-GUESS_rcp8p5_MAGPIE_cVeg_2050_1x1.tif']\n LPJ_GUESS_path_list = [\n os.path.join(raw_tif_dir, b) for b in LPJ_GUESS_basename_list]\n results_dir = os.path.join(workspace_dir, 'aligned_inputs')\n aligned_inputs_dict = {\n 'kba_raster': os.path.join(\n results_dir, os.path.basename(data_dict['kba_raster'])),\n 'countries_mask': os.path.join(\n results_dir, os.path.basename(data_dict['countries_mask'])),\n 'service_raster_list': [\n os.path.join(results_dir, os.path.basename(r)) for r in\n LPJ_GUESS_path_list],\n }\n existing_processed_inputs = [\n os.path.join(results_dir, os.path.basename(r))\n for r in LPJ_GUESS_path_list]\n if all([os.path.exists(p) for p in existing_processed_inputs]):\n return aligned_inputs_dict\n\n kba_pixel_size = pygeoprocessing.get_raster_info(\n data_dict['kba_raster'])['pixel_size']\n service_pixel_size = pygeoprocessing.get_raster_info(\n LPJ_GUESS_path_list[0])['pixel_size']\n input_path_list = (\n [data_dict['kba_raster'], data_dict['countries_mask']] +\n LPJ_GUESS_path_list)\n aligned_path_list = [\n os.path.join(aligned_raster_dir, os.path.basename(r)) for r in\n input_path_list]\n print(\"Aligning LPJ-GUESS carbon rasters: \")\n pygeoprocessing.align_and_resize_raster_stack(\n input_path_list, aligned_path_list,\n ['near'] * len(aligned_path_list), kba_pixel_size,\n bounding_box_mode=\"union\", raster_align_index=0)\n raise ValueError(\"Carbon rasters must be converted to Gt in R\")", "def run_inference_true_path(self, r, xr, yr):\n self.tc.reset()\n\n em_data = {}\n\n print('Running Image Optimization using True Eye Path\\n')\n\n for u in range(self.n_itr):\n t0 = self.n_t * u / self.n_itr\n tf = self.n_t * (u + 1) / self.n_itr\n print('Iteration: {} | Running up to time {}'.format(u, tf))\n\n self.run_m_true_path(t0, tf, r, xr, yr, n_g_itr=self.n_g_itr)\n\n iteration_data = {\n 'time_steps': tf,\n 'image_est': self.tc.image_est(),\n 'coeff_est': self.tc.get_A()}\n\n em_data[u] = iteration_data\n em_data['mode'] = 'path_given'\n\n if self.save_mode:\n self.data['EM_data'] = em_data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Successively applies each of the rotations stored in givens to H_col.
def apply_rotations(H_col: jax.ShapedArray, givens: jax.ShapedArray, k: int) -> jax.ShapedArray: rotation_carry = (H_col, 0, k, givens) def loop_condition(carry): i = carry[1] k = carry[2] return jax.lax.cond(i < k, lambda x: True, lambda x: False, 0) def apply_ith_rotation(carry): H_col, i, k, givens = carry cs = givens[0, i] sn = givens[1, i] H_i = cs * H_col[i] - sn * H_col[i + 1] H_ip1 = sn * H_col[i] + cs * H_col[i + 1] H_col = jax.ops.index_update(H_col, jax.ops.index[i], H_i) H_col = jax.ops.index_update(H_col, jax.ops.index[i + 1], H_ip1) return (H_col, i + 1, k, givens) rotation_carry = jax.lax.while_loop(loop_condition, apply_ith_rotation, rotation_carry) H_col = rotation_carry[0] return H_col
[ "def apply_givens_rotation(H_col: jax.ShapedArray, givens: jax.ShapedArray,\n k: int) -> Tuple[jax.ShapedArray, jax.ShapedArray]:\n # This call successively applies each of the\n # Givens rotations stored in givens[:, :k] to H_col.\n H_col = apply_rotations(H_col, givens, k)\n\n cs_k, sn_k = givens_rotation(H_col[k], H_col[k + 1])\n givens = jax.ops.index_update(givens, jax.ops.index[0, k], cs_k)\n givens = jax.ops.index_update(givens, jax.ops.index[1, k], sn_k)\n\n r_k = cs_k * H_col[k] - sn_k * H_col[k + 1]\n R_col = jax.ops.index_update(H_col, jax.ops.index[k], r_k)\n R_col = jax.ops.index_update(R_col, jax.ops.index[k + 1], 0.)\n return R_col, givens", "def cw_rotate(self):\n self.grid = [list(x) for x in zip(*self.grid[::-1])]\n self.find_edges()", "def transform(self, rot = numpy.array([[1,0,0],[0,1,0],[0,0,1]]), tran = numpy.array([0,0,0])):\n for o in self:\n o.transform(rot, tran)\n if self.hetatm:\n for hetatom in self.hetatm:\n hetatom.transform(rot,tran)", "def ROTION(Uocc,h,Js,Ks,f,a,b,nocc,shell,verbose=False):\n nsh = len(f)\n hmo = ao2mo(h,Uocc)\n Jmo = [ao2mo(J,Uocc) for J in Js]\n Kmo = [ao2mo(K,Uocc) for K in Ks]\n Eone = sum(f[shell[i]]*hmo[i,i] for i in range(nocc))\n Fmo = [f[i]*hmo + sum(a[i,j]*Jmo[j] + b[i,j]*Kmo[j] for j in range(nsh))\n for i in range(nsh)]\n Eel = Eone + sum(Fmo[shell[i]][i,i] for i in range(nocc))\n \n Delta = np.zeros((nocc,nocc),'d')\n for i in range(nocc):\n ish = shell[i]\n for j in range(i):\n jsh = shell[j]\n if ish == jsh: continue\n # ish is now guaranteed to be larger than 0\n Jij = Jmo[ish][j,j]\n Kij = Kmo[ish][j,j]\n Gij = 2*(a[ish,ish]+a[jsh,jsh]-2*a[ish,jsh])*Kij \\\n + (b[ish,ish]+b[jsh,jsh]-2*b[ish,jsh])*(Jij+Kij)\n\n D0 = -(Fmo[jsh][i,j]-Fmo[ish][i,j])/\\\n (Fmo[jsh][i,i]-Fmo[ish][i,i]-Fmo[jsh][j,j]+Fmo[ish][j,j]\\\n +Gij)\n Delta[i,j] = D0\n Delta[j,i] = -D0\n if verbose:\n print(\"ROTION Delta Matrix\")\n print(Delta)\n if nsh > 1:\n eD = expm(Delta)\n Uocc = np.dot(Uocc,eD)\n return Eel,Eone,Uocc", "def test_givens_rotate(shape, indices, row, left):\n matrix = np.random.rand(*shape) * 1j + np.random.rand(*shape)\n unitary, (i, j) = matrix.copy(), indices\n if row:\n a, b = matrix[indices, j - 1]\n grot_mat = _givens_matrix(a, b, left)\n unitary[indices] = grot_mat @ unitary[indices]\n res = b / np.abs(b) * np.hypot(np.abs(a), np.abs(b)) if b else 1.0\n if left:\n assert np.isclose(unitary[i, j - 1], 0.0) and np.isclose(unitary[j, j - 1], res)\n else:\n assert np.isclose(unitary[i, j - 1], res) and np.isclose(unitary[j, j - 1], 0.0)\n else:\n a, b = matrix[j - 1, indices].T\n grot_mat = _givens_matrix(a, b, left)\n unitary[:, indices] = unitary[:, indices] @ grot_mat.T\n res = b / np.abs(b) * np.hypot(np.abs(a), np.abs(b)) if b else 1.0\n if left:\n assert np.isclose(unitary[j - 1, i], 0.0) and np.isclose(unitary[j - 1, j], res)\n else:\n assert np.isclose(unitary[j - 1, indices[0]], res) and np.isclose(\n unitary[j - 1, indices[1]], 0.0\n )", "def transform_to_rotating_frame(H, U, D):\n \n #Determine the effective hamiltonian in the rotating frame\n Heff = lambda t: np.conj(U(t).T) @ H(t) @ U(t) + D\n \n return Heff", "def gyration(self):\n A = self.parent()\n hf = list(self.height_function())\n k = len(hf) - 1\n for i in range(1,k):\n for j in range(1,k):\n if (i+j) % 2 == 0 \\\n and hf[i-1][j] == hf[i+1][j] == hf[i][j+1] == hf[i][j-1]:\n if hf[i][j] < hf[i+1][j]:\n hf[i][j] += 2\n else:\n hf[i][j] -= 2\n for i in range(1,k):\n for j in range(1,k):\n if (i+j) % 2 == 1 \\\n and hf[i-1][j] == hf[i+1][j] == hf[i][j+1] == hf[i][j-1]:\n if hf[i][j] < hf[i+1][j]:\n hf[i][j] += 2\n else:\n hf[i][j] -= 2\n return A.from_height_function(matrix(hf))", "def _rotate(self, qfunc):\n self.faces = [np.apply_along_axis(qfunc, -1, face) for face in self.faces]", "def translate(g):\n\n gammaJJKK = 0\n for i in range(3):\n for j in range(3):\n gammaJJKK += g[i][j][0] \n gammaJKJK = 0\n for i in range(3):\n for j in range(3):\n gammaJKJK += g[i][j][1]\n gammaJKKJ=0\n for i in range(3):\n for j in range(3):\n gammaJKKJ += g[i][j][2] \n gamma_lab = np.empty([3,3,3,3])\n for i in range(3):\n for j in range(3):\n for k in range(3):\n for l in range(3):\n term1 = gammaJJKK * (4 * delta(i, j) * delta(k, l) - delta(i, k) * delta(j, l) - delta(i, l) * delta(j, k))\n term2 = gammaJKJK * (4 * delta(i, k) * delta(j, l) - delta(i, j) * delta(k, l) - delta(i, l) * delta(j, k))\n term3 = gammaJKKJ * (4 * delta(i, l) * delta(j, k) - delta(i, k) * delta(j, l) - delta(i, j) * delta(k, l))\n gamma_lab[i][j][k][l] = (term1 + term2 + term3)/30\n return gamma_lab", "def _h(self, g: common_gates.HPowGate, axis: int):", "def harmonic_vibrational_analysis(self):\n # Get the matrix of atomic masses\n mass_matrix_sqrt_div = np.diag(np.repeat(1.0 / np.sqrt(self.amass), 3))\n\n # calculate the center of mass in cartesian coordinates\n xyzcom = self.eq_geom_cart - self.com.T\n\n # Initialize (3N, 6) array for Translation and Rotation\n Dmat = np.zeros((3 * self.natom, 6), dtype=float)\n\n #####################################################\n # Construct Eigenvectors correspoding to Translation#\n #####################################################\n\n for i in range(3):\n for k in range(self.natom):\n for alpha in range(3):\n if alpha == i:\n Dmat[3 * k + alpha, i] = np.sqrt(self.amass[k])\n else:\n pass\n\n ###################################################\n # Construct Eigenvectors correspoding to Rotation #\n ###################################################\n\n # 1. Get Inertia Tensor and Diagonalize\n Ival, Ivec = np.linalg.eigh(self.moi)\n\n # 2. Construct Pmat\n Pmat = np.dot(xyzcom, Ivec)\n\n # 3. Construct Rotational Normal Coordinates\n for i in range(self.natom):\n for j in range(3):\n Dmat[3 * i + j, 3] = (Pmat[i, 1] * Ivec[j, 2] - Pmat[i, 2] * Ivec[j, 1]) * np.sqrt(self.amass[i])\n Dmat[3 * i + j, 4] = (Pmat[i, 2] * Ivec[j, 0] - Pmat[i, 0] * Ivec[j, 2]) * np.sqrt(self.amass[i])\n Dmat[3 * i + j, 5] = (Pmat[i, 0] * Ivec[j, 1] - Pmat[i, 1] * Ivec[j, 0]) * np.sqrt(self.amass[i])\n\n ##################################################################################\n # Set the orthonormalized Translation-Rotation Eigenvectors to attribute Ltransrot\n ##################################################################################\n\n Translation = Dmat[:, 0:3]\n Rotation = Dmat[:, 3:6]\n\n # Separately orthonormalize translation and rotation\n Dtrans, xxx = np.linalg.qr(Translation)\n Drot, xxx = np.linalg.qr(Rotation)\n\n LTR = np.zeros((3 * self.natom, 6), dtype=float)\n LTR[:, 0:3] = Dtrans\n LTR[:, 3:6] = Drot\n\n self.Ltransrot = Drot\n\n # Mass-weight the force constant matrix\n mw_fcm = np.dot(mass_matrix_sqrt_div, np.dot(self.force_constant_matrix, mass_matrix_sqrt_div))\n\n # Project out Rotation and Translation from Hessian\n Imat = np.eye(LTR.shape[0])\n llt = np.dot(LTR, LTR.T)\n proj_trans_rot_hessian = np.dot(Imat - llt, np.dot(mw_fcm, Imat - llt))\n rphval, rphvec = np.linalg.eigh(proj_trans_rot_hessian)\n\n # SORT OUT ALL -VE FREQUENCIES\n all_index_0 = np.where(abs(rphval) < 1e-4)[0]\n eigvals_0 = rphval[all_index_0]\n eigvec_0 = rphvec[:, all_index_0]\n\n # A cleaner solution?\n rphval = np.delete(rphval, all_index_0, axis=0)\n rphvec = np.delete(rphvec, all_index_0, axis=1)\n rphval = np.concatenate([eigvals_0, rphval])\n rphvec = np.concatenate([eigvec_0, rphvec], axis=1)\n\n vib_freq_cm = np.sqrt(abs(rphval[6:])) * hfreq_cm\n Lmwc = rphvec[:, 6:3 * self.natom]\n\n # NORMAL MODES - SET ATTRIBUTE\n self.Lmwc = Lmwc\n\n # HESSIAN - SET ATTRIBUTE [ATOMIC UNITS AT THIS POINT]\n self.hessian = np.diagflat(rphval[3:])\n\n # FREQUENCIES - SET ATTRIBUTE\n self.frequencies = vib_freq_cm\n\n return vib_freq_cm, Lmwc", "def systematize_algorithm(H: np.array) -> Tuple[np.array, np.array, np.array]:\n n, c = H.shape\n m = np.abs(n-c)\n\n G_s = np.zeros((m, c), dtype=int)\n G_s[:, :m] = np.identity(m)\n\n H_s, permutation = systematize_matrix(H, post_system=True)\n\n rev_permutation = reverse_permutation(permutation)\n\n P = H_s[:, :m]\n\n G_s[:, m:] = P.T\n\n G = G_s[:, rev_permutation]\n\n return G, G_s, H_s", "def _prep_gfunc(self):\n \n # prepare shifted momenta and angles for the symmetric permutation \n self.pip=np.empty((self.nqpoints+1,self.nqpoints+1,self.nx),dtype=np.double) \n self.pi=np.empty((self.nqpoints+1,self.nqpoints+1,self.nx),dtype=np.double) \n \n thetap=np.empty((self.nqpoints+1,self.nqpoints+1,self.nx),dtype=np.double)\n phip=np.empty((self.nqpoints+1,self.nqpoints+1,self.nx),dtype=np.double)\n theta=np.empty((self.nqpoints+1,self.nqpoints+1,self.nx),dtype=np.double)\n phi=np.empty((self.nqpoints+1,self.nqpoints+1,self.nx),dtype=np.double)\n thetapp=np.empty((self.nx),dtype=np.double)\n \n for ix in range(self.nx):\n xval=self.xp[ix]\n thetapp[ix]=np.arccos(xval) \n for jq in range(self.nqpoints+1):\n qpval=self.qgrid[jq]\n for iq in range(self.nqpoints+1):\n qval=self.qgrid[iq]\n \n px=qpval*np.sqrt(1.0-xval**2)\n py=0.0\n pz=0.5*qval+qpval*xval \n self.pi[iq,jq,ix],theta[iq,jq,ix],phi[iq,jq,ix]=self._angle(px,py,pz)\n \n px=-0.5*qpval*np.sqrt(1.0-xval**2)\n py=0.0\n pz=-qval-0.5*qpval*xval \n self.pip[iq,jq,ix],thetap[iq,jq,ix],phip[iq,jq,ix]=self._angle(px,py,pz)\n\n # prepare spherical harmonics and store based on lmindx \n # number of lam,mu und l,mu combinations \n nlamindx=self._lmindx(self.lammax,self.lammax)+1\n nlindx=self._lmindx(self.lmax,self.lmax)+1\n \n # array for Y_{lam mu}(hat qp) \n ylam=np.empty((nlamindx,self.nx),dtype=np.cdouble)\n for lam in range(self.lammax+1):\n for mu in range(-lam,lam+1):\n ylam[self._lmindx(lam,mu),:]=sph_harm(mu,lam, 0, thetapp)\n \n \n # array for Y_{l mu}(-q-0.5qp) \n yl=np.empty((nlindx,self.nqpoints+1,self.nqpoints+1,self.nx),dtype=np.cdouble)\n for l in range(self.lmax+1):\n for mu in range(-l,l+1):\n yl[self._lmindx(l,mu),:,:,:]=sph_harm(mu,l, phip, thetap)\n \n # array for Y*_{l mu}(0.5q+qp) (real is sufficient since phi=0)\n ystarl=np.empty((nlindx,self.nqpoints+1,self.nqpoints+1,self.nx),dtype=np.cdouble)\n for l in range(self.lmax+1):\n for mu in range(-l,l+1):\n ystarl[self._lmindx(l,mu),:,:,:]=sph_harm(mu,l, phi, theta)\n \n # now prepare the necessary Clebsch-Gordan coefficients\n # we need (l lam L, M 0 M) and (l lam L,mu M-mu,M)\n # I assume that L is smaller than the lmax or lammax therefore M=-L,L\n # the smallest index for storage \n \n cg=np.zeros((self.nalpha),dtype=np.double)\n cgp=np.zeros((self.nalpha,2*self.lmax+1),dtype=np.double)\n \n for qnset in self.qnalpha: # go through allowed l,lam combinations\n cg[qnset[\"alpha\"]]=float(CG(qnset[\"l\"],0,qnset[\"lam\"],0,self.bl,0).doit())\n for mu in range(-qnset[\"l\"],qnset[\"l\"]+1):\n cgp[qnset[\"alpha\"],mu+qnset[\"l\"]]=float(CG(qnset[\"l\"],mu,qnset[\"lam\"],-mu,self.bl,0).doit())\n\n # now we can perform the mu summation for the combination of coupled spherical harmonics \n ylylam=np.zeros((self.nalpha, self.nqpoints+1,self.nqpoints+1,self.nx),dtype=np.cdouble)\n for qnset in self.qnalpha: # go through allowed l,lam combinations\n alphap=qnset[\"alpha\"]\n l=qnset[\"l\"]\n lam=qnset[\"lam\"]\n for mu in range(-l,l+1):\n lmindx=self._lmindx(l,mu)\n if abs(-mu)<=lam:\n lamindx=self._lmindx(lam,-mu)\n ylylam[alphap,:,:,:]+=cgp[alphap,mu+l]*yl[lmindx,:,:,:]*ylam[lamindx,:]\n \n # bm summation then gives G (stored as gfunc in object) but M=0?\n self.gfunc=np.zeros((self.nalpha,self.nalpha,self.nqpoints+1,self.nqpoints+1,self.nx),dtype=np.cdouble)\n for qnset in self.qnalpha: # go through allowed l,lam combinations\n alpha=qnset[\"alpha\"]\n l=qnset[\"l\"]\n lam=qnset[\"lam\"]\n for qnsetp in self.qnalpha: # go through allowed l,lam combinations\n alphap=qnsetp[\"alpha\"]\n lmindx=self._lmindx(l,0) \n self.gfunc[alpha,alphap,:,:,:]+=8*m.pi**2*np.sqrt((2*lam+1)/(4*m.pi))/(2*self.bl+1) \\\n *ystarl[lmindx,:,:,:]*ylylam[alphap,:,:,:] \\\n *cg[alpha]\n \n # set spline elements based on grid points and shifted momenta \n self.splpi=Cubherm.spl(self.pgrid[0:self.npoints],self.pi)\n self.splpip=Cubherm.spl(self.pgrid[0:self.npoints],self.pip)", "def htm0_3(joint_rotations):\n # H0_1\n r0_1 = np.dot(rot_x(90), rot_y(joint_rotations[0]))\n d0_1 = transl(0, 0, a1)\n h0_1 = htm(r0_1, d0_1)\n\n # H1_2\n r1_2 = rot_z(joint_rotations[1])\n x1_2 = a2*np.cos(np.radians(joint_rotations[1]))\n y1_2 = a2*np.sin(np.radians(joint_rotations[1]))\n z1_2 = 0\n d1_2 = transl(x1_2, y1_2, z1_2)\n h1_2 = htm(r1_2, d1_2)\n\n # H2_3\n r2_3 = rot_z(joint_rotations[2])\n x2_3 = a3*np.cos(np.radians(joint_rotations[2]))\n y2_3 = a3*np.sin(np.radians(joint_rotations[2]))\n z2_3 = 0\n d2_3 = transl(x2_3, y2_3, z2_3)\n h2_3 = htm(r2_3, d2_3)\n\n # H0_3\n h0_2 = np.dot(h0_1, h1_2)\n h0_3 = np.dot(h0_2, h2_3)\n return h0_3", "def givens_rotation_matrix(i, j, theta, N):\n R = np.identity(N)\n c = np.cos(theta)\n s = np.sin(theta)\n R[i, i] = c\n R[j, j] = c\n R[i, j] = -s\n R[j, i] = s\n return R", "def rot_omega_sm(centre,theta,Ups,R_t):\n R_t_rot = copy(R_t)\n for i in range(2):\n gamma = complex(R_t[i][0,1])\n gamma_rot = rot_cpx(centre,theta,gamma) \n R_t_rot[i][0,1] = gamma_rot\n R_t_rot[i][1,0] = gamma_rot.conj()\n Omega= Ups[:3,3:]\n Omega_rot = rot_cpx(centre,theta,Omega)\n Ups_rot = Ups\n Ups_rot[:3,3:]=Omega_rot\n Ups_rot[3:,:3]=Omega_rot.T.conj()\n \n return Ups_rot,R_t_rot", "def diagonalizeHamiltonian(self,H):\n eig_vals,eig_vecs = (lg.eigh(H)) # new eigenvalues\n\n newbasisorder=[]\n \n for i in range(len(eig_vecs)):\n basisIndex = np.abs(eig_vecs[:,i]).argmax() # IMPORTANT: note how the eigenvector is picked out - eigenvectors are zeros with a single one at some index\n newbasisorder.append(basisIndex)\n \n eig_vecs = [x for _,x in sorted(zip(newbasisorder,eig_vecs))]\n \n return eig_vals,eig_vecs", "def mod(self):\n\n step = 100.0\n grid = [i / step for i in range(int(step))]\n h = grid[1]\n\n integral = h / 90. * 7 * at_dl(0)\n first = 0\n\n for a in grid[:-1]:\n\n integral += first\n\n integral += h / 90. * 32 * at_dl(a + h * 0.25) #x2\n integral += h / 90. * 12 * at_dl(a + h * 0.50) #x3\n integral += h / 90. * 32 * at_dl(a + h * 0.75) #x4\n\n first = h / 90. * 7 * at_dl(a + h) #x5\n\n integral += first\n\n return integral", "def set_rotation_matrices(self):\r\n for i in range(len(self.vertices)):\r\n self.vertices[i].meta['C'] = rotation_matrix(self.vertices[i].meta['axis'][0], \r\n self.vertices[i].meta['axis'][1], \r\n self.vertices[i].meta['axis'][2], \r\n self.vertices[i].meta['axis_order'],\r\n degrees=True)\r\n # Todo: invert this by applying angle operations in reverse order\r\n self.vertices[i].meta['Cinv'] = np.linalg.inv(self.vertices[i].meta['C'])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Applies the Givens rotations stored in the vectors cs and sn to the vector H_col. Then constructs a new Givens rotation that eliminates H_col's k'th element, yielding the corresponding column of the R in H's QR decomposition. Returns the new column of R along with the new Givens factors.
def apply_givens_rotation(H_col: jax.ShapedArray, givens: jax.ShapedArray, k: int) -> Tuple[jax.ShapedArray, jax.ShapedArray]: # This call successively applies each of the # Givens rotations stored in givens[:, :k] to H_col. H_col = apply_rotations(H_col, givens, k) cs_k, sn_k = givens_rotation(H_col[k], H_col[k + 1]) givens = jax.ops.index_update(givens, jax.ops.index[0, k], cs_k) givens = jax.ops.index_update(givens, jax.ops.index[1, k], sn_k) r_k = cs_k * H_col[k] - sn_k * H_col[k + 1] R_col = jax.ops.index_update(H_col, jax.ops.index[k], r_k) R_col = jax.ops.index_update(R_col, jax.ops.index[k + 1], 0.) return R_col, givens
[ "def apply_rotations(H_col: jax.ShapedArray, givens: jax.ShapedArray,\n k: int) -> jax.ShapedArray:\n rotation_carry = (H_col, 0, k, givens)\n\n def loop_condition(carry):\n i = carry[1]\n k = carry[2]\n return jax.lax.cond(i < k, lambda x: True, lambda x: False, 0)\n\n def apply_ith_rotation(carry):\n H_col, i, k, givens = carry\n cs = givens[0, i]\n sn = givens[1, i]\n H_i = cs * H_col[i] - sn * H_col[i + 1]\n H_ip1 = sn * H_col[i] + cs * H_col[i + 1]\n H_col = jax.ops.index_update(H_col, jax.ops.index[i], H_i)\n H_col = jax.ops.index_update(H_col, jax.ops.index[i + 1], H_ip1)\n return (H_col, i + 1, k, givens)\n\n rotation_carry = jax.lax.while_loop(loop_condition,\n apply_ith_rotation,\n rotation_carry)\n H_col = rotation_carry[0]\n return H_col", "def gqr(A):\n\tdef rotate(i,k,B):\n\t# create the Givens rotation matrix G to zero out the i,k entry of B\n\t\tc,s,r = solve(B[k,k],B[i,k])\n\t\tr = sp.sqrt(B[k,k]**2 + B[i,k]**2)\n\t\tc = B[k,k]/r\n\t\ts = -B[i,k]/r\n\t\tG = sp.eye(m)\n\t\tG[i,i] = c\n\t\tG[k,k] = c\n\t\tG[k,i] = -s\n\t\tG[i,k] = s\n\t\treturn G\n\t\n\tB = A.copy()\t\n\tm,n = B.shape\n\tG = sp.eye(m)\n\t#cycle through each nonzero subdiagonal element of B, and rotate it to zero\n\tfor k in sp.arange(n-1):\n\t\tfor i in sp.arange(k+1,m):\n\t\t\tif B[i,k] is not 0:\n\t\t\t\tH = rotate(i,k,B)\n\t\t\t\tB = sp.dot(H,B)\n\t\t\t\tG = sp.dot(H,G)\n\treturn G.T, B", "def rotate_scheme_mat(sch_mat, cyldir1, cyldir2):\n if cyldir1.size != 3 or cyldir2.size != 3:\n raise ValueError(\"cyldir1 and cyldir2 should be 3-elements NumPy\"\n \" arrays.\")\n if (~np.isclose(np.sum(cyldir1**2), 1) or\n ~np.isclose(np.sum(cyldir2**2), 1)):\n raise ValueError(\"cyldir1 and cyldir2 should have unit norm.\")\n # Rotation axis to go from default z-axis to new cyldir\n rot_ax = np.cross(cyldir1, cyldir2)\n\n # If cyldir already parallel to z (cross product is zero), simply return\n # original sch_mat\n sch_mat_eff = sch_mat\n\n rot_ax_sqrd = np.sum(rot_ax**2)\n if rot_ax_sqrd > 0:\n # z-axis and cyldir not parallel (cross product is a non-zero vector)\n rot_ax = rot_ax/np.sqrt(rot_ax_sqrd)\n rot_ang = np.arccos(np.dot(cyldir1, cyldir2))\n rot_mat = vrrotvec2mat(rot_ax, -rot_ang)\n rot_gdir = sch_mat[:, :3] @ rot_mat.T\n # ! one rotated direction could be [eps, eps, sqrt(1-2eps**2)~=1]\n rot_gdir[np.abs(rot_gdir) <= np.finfo(float).eps] = 0\n rot_gdir_norm = np.sqrt(np.sum(rot_gdir**2,\n axis=1, keepdims=True)) # (Nseq, 1)\n nnz_g = np.squeeze(rot_gdir_norm > 0)\n rot_gdir[nnz_g, :] = (rot_gdir[nnz_g, :]/rot_gdir_norm[nnz_g, :])\n sch_mat_eff = rot_gdir # (Nseq, 3)\n if sch_mat.shape[1] > 3:\n sch_mat_eff = np.hstack((sch_mat_eff, sch_mat[:, 3:]))\n return sch_mat_eff", "def v_circular(snap, subh, layers, rmax):\n start=time.clock()\n\n CM = subh.pot_min\n # Now using CLUES, so unit is Mpc/h\n pos = (np.concatenate((snap['POS '][0], snap['POS '][4], snap['POS '][1])) - CM) * 1000 / hubble0\n\n rot_matrix = PCA_matrix(snap, subh)\n pos = np.dot(pos, rot_matrix)\n\n r = np.linalg.norm(pos, axis=1)\n # r = np.sqrt(pos[:, 0]**2, pos[:, 1]**2)\n\n ind = r < 200\n r = r[ind]\n\n masses = np.concatenate((snap['MASS'][0], snap['MASS'][4], snap['MASS'][1])) * 1e10 / hubble0\n masses = masses[ind]\n\n # We make a mass histogram with radial bins and calculate the inner mass at each radius:\n mass, radii = np.histogram(r, bins=layers, range=(0, rmax), weights=masses)\n inner_mass = np.cumsum(mass)\n\n # We calculate r_200:\n rho = inner_mass / (4/3 * np.pi * radii[1:]**3)\n rho_crit = 126.7 # solar masses per kpc^3, from Planck\n ind_200 = (np.abs(rho - 200*rho_crit)).argmin() # This gives the index of the bin where rho is closest to 200*rho_crit\n r_200 = radii[ind_200]\n print('r_200 = %s kpc' %r_200)\n\n # Finally, we calculate v_circ with the newtonian expression:\n G = 43007.1 # gravitational constant in code units\n v_circ = np.sqrt(G * 1e-10 * inner_mass / radii[1:]) # I use 1e-10 to turn mass back to code units, h in mass and radii cancel out.\n \t\t\t\t\t\t\t\t\t\t\t\t\t# Velocity comes out in km/s\n\n print('v_circular takes %s seconds' %(time.clock() - start))\n return v_circ[0:ind_200], radii[1:ind_200+1], r_200", "def klhessh(V,W,H):\n \n K = V.shape[1]\n R = W.shape[1]\n data = np.zeros(K * R ** 2)\n \n row_ind1 = np.tile(np.repeat(np.arange(R), R), K)\n row_ind2 = np.repeat(np.arange(0, K * R, R), R ** 2)\n row_ind = row_ind1 + row_ind2\n \n col_ind1 = np.tile(np.arange(R), (1, K * R))[0]\n col_ind2 = np.repeat(np.arange(0, K * R, R), R ** 2)\n col_ind = col_ind1 + col_ind2\n \n temp = V / (W.dot(H)) ** 2\n \n for k in range(K):\n \n hkx = W.transpose().dot(np.diag(temp[:,k]).dot(W))\n hkx_row = np.reshape(hkx, (1, R ** 2))[0]\n data[k * R ** 2: (k + 1) * R ** 2] = hkx_row.copy()\n \n return scipy.sparse.csr_matrix((data, (row_ind, col_ind)), shape = [R * K, R * K])", "def rotation_cs(X, Y, C, S) :\n Xrot = X*C - Y*S \n Yrot = Y*C + X*S \n return Xrot, Yrot", "def gram_schmidt(S, start_col=0):\n Q = S.copy()\n k = S.shape[1]\n assert k > 1 and start_col >= 0\n start_col = min(S.shape[1], start_col)\n if Q.dtype != np.float32 and Q.dtype != np.float64:\n Q = Q.astype(np.float64)\n\n if start_col == 0:\n Q[:, 0] = normalize_vector(Q[:, 0])\n\n uu = []\n for i in range(start_col + 1, k):\n Q[:, i] = S[:, i]\n for j in range(0, i):\n u = Q[:, j]\n v = Q[:, i]\n if len(uu) <= j:\n uu.append(u.T.dot(u))\n Q[:, i] -= u * (u.T.dot(v) / uu[j])\n\n Q[:, i] = normalize_vector(Q[:, i])\n # Re-project Q[:, i] to the orthogonal complement of Q[:, :i] to make sure they stay orthogonal.\n Q[:, i] = Q[:, i] - Q[:, :i].dot(Q[:, :i].T.dot(Q[:, i]))\n\n return Q", "def coupling_pressures(zhr, rho, c, k, nr, G):\n diag = zhr - np.real(rho * c * (1 - np.exp(-1j * k * nr)))\n np.fill_diagonal(G, diag)\n return G", "def homology(K):\n d=boundary_operators(K)\n D=[None for x in range(dim(K)+1)] \n L=[None for x in range(dim(K)+2)] \n R=[None for x in range(dim(K)+1)] \n H=[None for x in range(dim(K)+1)] \n r=[0 for x in range(dim(K)+2)] \n # first: k=0:\n c=dims_c(K)\n # then: compute all D,L,R: \n # R[1] ... R[dim(K)] , \n # D[1] ... D[dim(K)]\n # L[1] ... L[dim(K)]\n R[0]=eye(c[0])\n L[dim(K)+1]=eye(c[dim(K)])\n\n for k in range(1,dim(K)+1):\n ## L d R = D \n L[k], D[k], R[k] = LDR( R[k-1].inv() * d[k] )\n r[k]=rank_of_diagonal(D[k]) ## TODO: just take it from the pivot list\n shift_matrix=shift_Z(c[k],r[k]) \n D[k] = D[k] * shift_matrix \n R[k] = R[k] * shift_matrix\n for k in range(dim(K)+1): \n gens_of_H = [(R[k]*L[k+1].inv())[:,x] for x in range(r[k+1],c[k]-r[k])]\n hk=len(gens_of_H)\n homology_class_matrix= Matrix.hstack( zeros(hk,r[k+1]), \n eye(hk), zeros(hk,r[k]) ) * L[k+1] * R[k].inv() \n H[k]={'gens': gens_of_H,'hcm':homology_class_matrix} #matrix of the projection C_k -> H_k\n return (L,D,R,r,H)", "def systematize_algorithm(H: np.array) -> Tuple[np.array, np.array, np.array]:\n n, c = H.shape\n m = np.abs(n-c)\n\n G_s = np.zeros((m, c), dtype=int)\n G_s[:, :m] = np.identity(m)\n\n H_s, permutation = systematize_matrix(H, post_system=True)\n\n rev_permutation = reverse_permutation(permutation)\n\n P = H_s[:, :m]\n\n G_s[:, m:] = P.T\n\n G = G_s[:, rev_permutation]\n\n return G, G_s, H_s", "def hcrs_to_hgs(hcrscoord, hgsframe):\n if hgsframe.obstime is None:\n raise ValueError(\"To perform this transformation the coordinate\"\n \" Frame needs an obstime Attribute\")\n\n # Determine the Sun-Earth vector in ICRS\n # Since HCRS is ICRS with an origin shift, this is also the Sun-Earth vector in HCRS\n sun_pos_icrs = get_body_barycentric('sun', hgsframe.obstime)\n earth_pos_icrs = get_body_barycentric('earth', hgsframe.obstime)\n sun_earth = earth_pos_icrs - sun_pos_icrs\n\n # De-tilt the Sun-Earth vector to the frame with the Sun's rotation axis parallel to the Z axis\n sun_earth_detilt = sun_earth.transform(_SUN_DETILT_MATRIX)\n\n # Remove the component of the Sun-Earth vector that is parallel to the Sun's north pole\n hgs_x_axis_detilt = CartesianRepresentation(sun_earth_detilt.xyz * [1, 1, 0])\n\n # The above vector, which is in the Sun's equatorial plane, is also the X axis of HGS\n x_axis = CartesianRepresentation(1, 0, 0)\n rot_matrix = _make_rotation_matrix_from_reprs(hgs_x_axis_detilt, x_axis)\n\n return matrix_product(rot_matrix, _SUN_DETILT_MATRIX)", "def transform_to_rotating_frame(H, U, D):\n \n #Determine the effective hamiltonian in the rotating frame\n Heff = lambda t: np.conj(U(t).T) @ H(t) @ U(t) + D\n \n return Heff", "def precompute_spin_diagonal_block(self,e):\n# {{{\n ket = ci_string(self.no,e)\n bra = ci_string(self.no,e)\n H = np.zeros((ket.max(),ket.max()))\n \n # avoid python function call overhead\n ket_max = ket.max()\n bra_max = bra.max()\n range_ket_n_orbs = range(ket.n_orbs())\n range_ket_max = range(ket.max())\n \n for K in range_ket_max:\n bra.dcopy(ket)\n\n # hpq p'q \n for p in range_ket_n_orbs:\n for q in range_ket_n_orbs:\n bra.dcopy(ket)\n bra.a(q)\n if bra.sign() == 0:\n continue\n bra.c(p)\n if bra.sign() == 0:\n continue\n \n L = bra.linear_index()\n\n #print(str(ket),q,' -> ',p,str(bra))\n \n term = self.H.t[q,p]\n sign = bra.sign() \n \n H[K,L] += term * sign\n\n # <pq|rs> p'q'sr -> (pr|qs) \n for r in range(0,ket.n_orbs()):\n for s in range(r+1,ket.n_orbs()):\n for p in range(0,ket.n_orbs()):\n for q in range(p+1,ket.n_orbs()):\n \n bra.dcopy(ket)\n \n bra.a(r) \n if bra.sign() == 0:\n continue\n bra.a(s) \n if bra.sign() == 0:\n continue\n bra.c(q) \n if bra.sign() == 0:\n continue\n bra.c(p) \n if bra.sign() == 0:\n continue\n L = bra.linear_index()\n Ipqrs = self.H.V[p,r,q,s]-self.H.V[p,s,q,r]\n sign = bra.sign()\n H[K,L] += Ipqrs*sign\n ket.incr()\n\n return H", "def test_squeezing(self, tol):\n r = 0.543\n phi = 0.123\n S = symplectic.squeezing(r, phi)\n out = S @ S.T\n\n # apply to an identity covariance matrix\n rotation = np.array(\n [[np.cos(phi / 2), -np.sin(phi / 2)], [np.sin(phi / 2), np.cos(phi / 2)]]\n )\n expected = rotation @ np.diag(np.exp([-2 * r, 2 * r])) @ rotation.T\n assert np.allclose(out, expected, atol=tol, rtol=0)", "def kernel(h):\n ker = np.zeros((4*h-1, 4*h-1, 4*h-1))\n\n for i in range(4*h-1):\n for j in range(4*h-1):\n for k in range(4*h-1):\n r_grid = np.linalg.norm([i - 2*h + 1, j - 2*h + 1, k - 2*h + 1])\n ker[i, j, k] = W(r_grid / h, h)\n return ker", "def givens_rotation_matrix(i, j, theta, N):\n R = np.identity(N)\n c = np.cos(theta)\n s = np.sin(theta)\n R[i, i] = c\n R[j, j] = c\n R[i, j] = -s\n R[j, i] = s\n return R", "def diagonalizeHamiltonian(self,H):\n eig_vals,eig_vecs = (lg.eigh(H)) # new eigenvalues\n\n newbasisorder=[]\n \n for i in range(len(eig_vecs)):\n basisIndex = np.abs(eig_vecs[:,i]).argmax() # IMPORTANT: note how the eigenvector is picked out - eigenvectors are zeros with a single one at some index\n newbasisorder.append(basisIndex)\n \n eig_vecs = [x for _,x in sorted(zip(newbasisorder,eig_vecs))]\n \n return eig_vals,eig_vecs", "def _rmat_s_helper(chi=None, omes=None, out=None):\n if chi is not None:\n cx = np.cos(chi)\n sx = np.sin(chi)\n else:\n cx = 1.0\n sx = 0.0\n\n if omes is not None:\n # omes is an array (vector): output is as many rotation matrices as omes entries.\n n = len(omes)\n out = out if out is not None else np.empty((n,3,3), dtype=omes.dtype)\n\n if chi is not None:\n # ome is array and chi is a value... compute output\n cx = np.cos(chi)\n sx = np.sin(chi)\n for i in range(n):\n cw = np.cos(omes[i])\n sw = np.sin(omes[i])\n out[i, 0, 0] = cw; out[i, 0, 1] = 0.; out[i, 0, 2] = sw\n out[i, 1, 0] = sx*sw; out[i, 1, 1] = cx; out[i, 1, 2] = -sx*cw\n out[i, 2, 0] = -cx*sw; out[i, 2, 1] = sx; out[i, 2, 2] = cx*cw\n else:\n # omes is array and chi is None -> equivalent to chi=0.0, but shortcut computations.\n # cx IS 1.0, sx IS 0.0\n for i in range(n):\n cw = np.cos(omes[i])\n sw = np.sin(omes[i])\n out[i, 0, 0] = cw; out[i, 0, 1] = 0.; out[i, 0, 2] = sw\n out[i, 1, 0] = 0.; out[i, 1, 1] = 1.; out[i, 1, 2] = 0.\n out[i, 2, 0] = -sw; out[i, 2, 1] = 0.; out[i, 2, 2] = cw\n else:\n # omes is None, results should be equivalent to an array with a single element 0.0\n out = out if out is not None else np.empty((1, 3, 3))\n if chi is not None:\n # ome is 0.0. cw is 1.0 and sw is 0.0\n cx = np.cos(chi)\n sx = np.sin(chi)\n out[0, 0, 0] = 1.; out[0, 0, 1] = 0.; out[0, 0, 2] = 0.\n out[0, 1, 0] = 0.; out[0, 1, 1] = cx; out[0, 1, 2] = -sx\n out[0, 2, 0] = 0.; out[0, 2, 1] = sx; out[0, 2, 2] = cx\n else:\n # both omes and chi are None... return a single identity matrix.\n out[0, 0, 0] = 1.; out[0, 0, 1] = 0.; out[0, 0, 2] = 0.\n out[0, 1, 0] = 0.; out[0, 1, 1] = 1.; out[0, 1, 2] = 0.\n out[0, 2, 0] = 0.; out[0, 2, 1] = 0.; out[0, 2, 2] = 1.\n\n\n return out", "def GramSchmidt(A):\r\n n = len(A)\r\n # Finds the number of lists in the list, which is also the number of rows\r\n m = len(A[0])\r\n # Finds the number of elements in list one, which is also the number of columns\r\n V = A\r\n R = [[0]*n for i in range(n)]\r\n # creates an empty list R with dimensions of n rows and n columns\r\n Q = [[0]*m for i in range(n)]\r\n # creates an empty list Q with dimensions of n rows and m columns\r\n inputStatus = True\r\n # inputStatus is true at this point until proven otherwise\r\n for i in range(n):\r\n for j in range(m):\r\n if ((type(A[i][j]) != int) and (type(A[i][j]) != float) and (type(A[i][j]) != complex)):\r\n inputStatus = False\r\n print(\"Invalid Input\")\r\n # this checks each value in the matrix A to make sure it is some time of number, if it isnt a number then the input status will be false \r\n # if the input status is false then an error message will be displayed stating that this is an invalid input\r\n if inputStatus == True:\r\n # if the given list does not fall under the previous if statement then the input status will continue to be true and we can continue to find the QR factorization \r\n for i in range(n):\r\n # for loop which continues as long as there are still lists in A \r\n R[i][i] = norm(V[i])\r\n # Creates the border for the upper triangle matrix R, where each value in the diagonal is the 2 norm of the corresponding vector in the original matrix A \r\n Q[i] = unit(V[i])\r\n # Each vector in Q is the unit vector of the corresponding vector in A \r\n for j in range(i+1,n):\r\n # the position j will be 1 more than the position i \r\n R[j][i] = dot(Q[i],V[j])\r\n # The element in R[i+1][i] is the dot product of Q[i] and V[i+1] \r\n temp = scalarmul(R[j][i],Q[i])\r\n # This is the scalar multiplication of R[i+1][i] and Q[i] which will be labeled as temp \r\n V[j] = subtract(V[j],temp)\r\n # V[j] is the difference between the original V[j] and temp \r\n return[Q,R]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if quote already exists in Nostalgiabot's memory for this Person.
def has_said(self, quote: str) -> bool: return any(q for q in self.quotes if q.content.lower() == quote.lower())
[ "def check_penseive_quote_exists(self, pitem):\n calais_quotes = pitem.quoteitem_set.all().filter(entity__type__source='C')\n if calais_quotes.count() > 0:\n return True\n return False", "def _is_term_exist(self, term):\r\n return term in self.postingDict", "def _is_term_exist(self, term):\n return term in self.postingDict", "def is_term_exist(self, term):\r\n return term in self.postingDict", "def check_existed_did(self):\n for wallet in self.wallet_state_manager.wallets.values():\n if (\n wallet.type() == WalletType.DECENTRALIZED_ID\n and self.did_info.origin_coin.name() == wallet.did_info.origin_coin.name()\n ):\n self.log.warning(f\"DID {self.did_info.origin_coin} already existed, ignore the wallet creation.\")\n raise ValueError(\"Wallet already exists\")", "def check_if_person_id_has_used_coupon(self, coupon_object, person_object):", "def isUnique(self, word):\n abbr = self.getAbbr(word)\n return abbr not in self.d or len(self.d[abbr]) == 1 and self.d[abbr][0] == word", "def exist_name(name):\n return name in phonebook", "def exists (self):\n q = COT_Record.gql (\"WHERE symbol = :1 and date = :2\", self.symbol, self.date)\n if len (q.fetch (10)) > 0:\n return True\n else:\n return False", "def isUnique(self, word):\n abbr = self.gen_abbr(word)\n\n if abbr not in self.dict:\n return True\n elif len(self.dict[abbr]) == 1 and word in self.dict[abbr]:\n return True\n else:\n return False", "def is_person_identifier_used(person_id):\n try:\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n c.execute(\"SELECT personid FROM person WHERE personid =?\", (person_id,))\n person_identifier = \"\"\n is_used = True\n for row in c:\n person_identifier = row[\"personid\"]\n conn.close()\n if len(person_identifier) == 0:\n is_used = False\n if len(person_identifier) > 0:\n is_used = True\n return is_used\n except:\n return False", "def donor_name_exists(self, name):\n if isinstance(name, str):\n for donor in self.donors:\n if donor.name == name:\n return True\n return False\n else:\n raise TypeError(\"Name is not a text.\")", "def check_book_already_issued(stud, book_obj):\r\n for _ in stud.book_issued:\r\n if _.isbn == book_obj.isbn:\r\n print(\"You already have same copy ! , Can't issue another copy\")\r\n return False\r\n return True", "async def _exists(self, key):\n return key in SimpleMemoryBackend._cache", "def exists(self):\n self.cursor.execute(f\"\"\"\n SELECT 1\n FROM {self.table_name}\n WHERE {self.lookup_type}='{self.word}'\n \"\"\")\n return True if self.cursor.fetchone() else False", "def check_person_existence(self, searched_person_id):\n self.__load_persons_from_file_into_memory()\n return super().check_person_existence(searched_person_id)", "def test_phonebook_with_duplicate_entries_is_inconsostent(self):\n self.phonebook.add(\"Bob\", \"12345\")\n self.phonebook.add(\"Mary\", \"12345\")\n self.assertFalse(self.phonebook.is_consistent())", "def HasBookName(self) -> bool:", "def IsDuplicate(self,QueryQuest):\n\t\tfor EachQuest in self.Quests:\n\t\t\tif str(EachQuest) == str(QueryQuest):\n\t\t\t\treturn(True)\n\t\treturn(False)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the stay time of visit
def update_stay_time(self): # It would not be better to simply self.stay_time = self.get_length() ?? self.stay_time = self.get_length()
[ "def _update_time(self):\n self.prev_time = time.time()", "def _RecordVisitTime(self, mr, now=None):\n now = now or int(time.time())\n if not settings.read_only and mr.auth.user_id:\n user_pb = mr.auth.user_pb\n if (user_pb.last_visit_timestamp <\n now - framework_constants.VISIT_RESOLUTION):\n user_pb.last_visit_timestamp = now\n self.services.user.UpdateUser(mr.cnxn, user_pb.user_id, user_pb)", "def update(self):\n self.last_time = self.current_time\n self.current_time = time.perf_counter()\n self.delta_time = self.current_time - self.last_time", "def update_now_and_tomorrow(self):\n self.yesterday = self.get_today() - datetime.timedelta(1)\n self.today = self.get_today()\n self.tomorrow = self.get_today() + datetime.timedelta(1)", "def update_activity(self):\n self.last_modified_date = date.now()", "def increase_time(self,s):\n self.days += 1\n if self.disease_status > 0:\n self.time_since_infection += 1\n if self.days == 365:\n self.increase_age(s)", "def update(self, now=True):\n if now:\n self.UT = datetime.datetime.utcnow()\n self.CalcJulDay()\n self.CalcLST()", "def setSubmitTime(t):", "def update_trip_time(trip_path, paths, stay_time, mpoi_gains, start_end, model_params, method_use, stay_offset):\n\n trip_time = 0.0\n tot_gain = 0.\n time_list = []\n stay_list = []\n gain_list = []\n\n for idx, node in enumerate(trip_path):\n next_node = trip_path[(idx+1)%trip_path.size]\n rtime = paths[node, next_node]\n trip_time += rtime\n time_list.append(rtime)\n\n # if this is start node or end node check if it is in the tour\n if next_node in start_end and not start_end[next_node]:\n # don't add stay time\n gain_list.append(0)\n stay_list.append(0)\n else:\n # compute stay time\n if method_use == method.proposed or method_use == method.personal or method_use == method.profit:\n stime, gain = find_stay_time(model_params[next_node], rtime, stay_time[next_node], mpoi_gains[next_node], stay_offset)\n else:\n stime = stay_time[next_node]\n gain = mpoi_gains[next_node]\n trip_time += stime\n tot_gain += gain\n\n stay_list.append(stime)\n gain_list.append(gain)\n \n return trip_time, tot_gain, time_list, stay_list, gain_list", "def activity_walk_bike_time_update(request, pk, walk_time, bike_time):\n activity = Activity.objects.get(pk=pk)\n if walk_time > 0:\n activity.walk_time = walk_time\n if bike_time > 0:\n activity.bike_time = bike_time\n activity.save()\n return Response({'walk_time': activity.walk_time, 'bike_time': activity.bike_time},\n status=status.HTTP_200_OK)", "def update_timeval(self):\n self.timeval = self.get_timeval()", "def update(self, delta_time):\n pass", "def auto_invalidate(self):\n current = datetime.now()\n if current > self._invalidated + timedelta(seconds=self._timetolive):\n self.invalidate()", "def update_activity(self):\n self.last_modified_date = date.now()\n #self.project.update_activity()", "def update_last_ate():\r\n global sent_notification,variables\r\n if sent_notification==1:\r\n send_email(\"Feeding the cat\", \"you have fed the cat and he is back to normal\")\r\n variables[\"time\"]=time.time()\r\n sent_notification = 0", "def update_times(self):\n if self.train_equation.velocity != 0:\n self.traveling_time += self.time_dynamics.step_duration\n else:\n self.stopped_time += self.time_dynamics.step_duration", "def change_time(self, new_time):\r\n self.when = new_time", "def _update_active_rides_fast(self, time: datetime) -> None:\n pass", "def updatePullDate(self):\n self.startTime = datetime.now()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Repeats a message multiple times.
async def repeat(self,ctx, times: int, content='repeating...'): for i in range(times): await ctx.send(content)
[ "async def repeat(ctx, times: int, *, message):\n for i in range(times):\n await ctx.send(message)", "async def repeat(times : int, content='repeating...'):\r\n for i in range(times):\r\n await bot.say(content)", "async def repeat(ctx, times: int, content='repeating...'):\n for i in range(times):\n await ctx.send(content)", "async def do(ctx, times : int, *, command):\n msg = copy.copy(ctx.message)\n msg.content = command\n for i in range(times):\n await bot.process_commands(msg)", "async def do_repeat(self, ctx, *, input: str):\n await ctx.reply(input, mention_author = False)", "def cycle(self, message):\n msg_list = self.ts.get_human_readable_message(message).split(' ')\n players = self.player_queue.pop_all()\n players_str = ' '.join(players)\n channel = SOCKET_ARGS['channel']\n if len(msg_list) > 1:\n credential_str = ' '.join(msg_list[1:])\n whisper_str = 'You may now join {} to play. The credentials you need are: {}'.format(\n channel, credential_str)\n self.player_queue_credentials = credential_str\n else:\n whisper_str = 'You may now join {} to play.'.format(channel)\n self.player_queue_credentials = None\n for player in players:\n self._add_to_whisper_queue(player, whisper_str)\n # self.command_queue.appendleft(('_delete_last_row', {}))\n self._add_to_chat_queue(\"Invites sent to: {} and there are {} people left in the queue\".format(\n players_str, len(self.player_queue.queue)))", "async def send_two_messages():\n for _ in range(2):\n await send_single_message()", "def send_spam_msg(driver, name, message, n):\r\n\r\n for i in range(n):\r\n send_message(driver, name, message)", "def repeat(word, repetitions):\n return word * repetitions", "def repeat(self, count):\n return self.Sequence((self,) * count)", "def repeat(self, value):\n return self._query(\"tl.repeat\", str=str(value))", "def repeat(self):\n self.retry_attempt = self.retry_count\n while self.retry_attempt:\n if self.retry_attempt != self.retry_count:\n sleep(self.retry_pause)\n self.retry_attempt -= 1\n yield self", "def simple_make_qrepeat(iter_cnt, body):\n\n return 'qrepeat(%d, %s)' % (iter_cnt, str(body))", "def repeat_string_n_times(string, count):\n return string * int(count)", "def repeat_string_n_times(string, count):\r\n return string * int(count)", "def test_simple_repeat(self):\n r = mi.repeatfunc(lambda: 5)\n self.assertEqual([5, 5, 5, 5, 5], [next(r) for _ in range(5)])", "def repeat_timers(bot, chat_id, message_id):\n\n bot_collection[chat_id].timers.repeat()\n start_timer(bot, chat_id, message_id)", "def repeat(self):\r\n command = self.last_command[0]\r\n if command == 'note':\r\n parameter = self.last_command[1]\r\n print(type(parameter))\r\n self.music.add_note(parameter)\r\n else:\r\n self.music.add_note(0)", "def next_message(self):\r\n self.message_num += 1\r\n return \"%d\" % self.message_num" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
create data that use Choice model
def create_choices(question_model, text="text", total_votes = 0): return Choice.objects.create(question=question_model, text=text, total_votes=total_votes)
[ "def test_choices_can_be_added():\n decision = Decision()\n decision.add_choice(\"Truck\")\n assert decision.choices == {\"Truck\": []}\n decision.add_choice(\"Van\")\n assert decision.choices == {\"Truck\": [], \"Van\": []}", "def create_choice(question, choice_text, votes=0):\n return question.choice_set.create(choice_text=choice_text, votes=votes)", "def choice(self, obj):\r\n return (\r\n self.field.prepare_value(obj),\r\n SmartLabel(\r\n obj, self.field.label_from_instance, self.field.choice_attrs\r\n )\r\n )", "def _create_response_model(self, data):\n pass", "def choice(choices):\n\treturn models.CharField(max_length=1, choices=choices)", "def __init__(self, choices):\n\n self._choices = choices\n self._index = 0", "def __init__(self, choices, *args, **kwargs):\n super(RangePollChoiceForm, self).__init__(*args, **kwargs)\n nominees = [(i, '%d' % i) for i in range(0, choices.count()+1)]\n for choice in choices:\n self.fields['range_poll__%s' % str(choice.id)] = (\n forms.ChoiceField(widget=forms.Select(),\n choices=nominees,\n label=choice.nominee.get_full_name()))", "def choice() :", "def addLabelChoiceEntry(self, sizer, label, choices, initSel, fieldName, data=[]):\r\n lblCtrl = wx.StaticText(self, -1, label)\r\n choice = wx.Choice(self, -1, choices=choices)\r\n if not isinstance(fieldName, tuple):\r\n selectValue = getattr(self.defaults, fieldName, initSel)\r\n else:\r\n lista = getattr(self.defaults, fieldName[0], [])\r\n selectValue = lista[fieldName[1]] if lista else initSel\r\n selectIndex = choices.index(selectValue) if selectValue is not None else 0\r\n choice.SetSelection(selectIndex)\r\n choice.customData = data\r\n #self.Bind(wx.EVT_CHOICE, self.OnChoiceType, choice)\r\n sizer.Add(lblCtrl, 0, 0)\r\n sizer.Add(choice, 0, 0)\r\n return (lblCtrl, choice)", "def test_choice_field(self):\n field = UserField(name=\"test_choice\", container=\"choice\").initialize()\n field.tooltip = \"Please help me !!!\"\n field.validate_configuration()\n field.configuration = {'last_index': 1,\n 'choices': [[1, 'Choice 1']]}\n field.validate_configuration()\n dictionary = field.to_dict()\n self.assertEquals(str(field.uuid), dictionary['id'])\n self.assertEquals(field.name, dictionary['name'])\n self.assertEquals(field.container, dictionary['container'])\n self.assertEquals(field.tooltip, dictionary['tooltip'])\n self.assertEquals(field.configuration, dictionary['configuration'])", "def get_choicesdata(self):\n selected_value = self.get_cleaned_value()\n choicesdata = []\n for value, label in self.get_choices_cached():\n is_selected = value == selected_value\n url = self.build_set_values_url(values=[value])\n choicesdata.append({\n 'url': url,\n 'label': label,\n 'is_selected': is_selected,\n 'dom_id': '{}_{}'.format(self.get_inputfield_dom_id(), value)\n })\n return choicesdata", "def test_generate_choice_input(self):\n \n\n label_to_use=str(self.test_option_choice).split('/')[-1].strip('--')\n obs=generate_choice_input('make_3d_plots','Test choice',\n 'Test choice',self.test_option_choice,\n label_to_use)\n \n exp='<tr><th>Test choice&nbsp;Test choice</th>' +\\\n '<td><select id=\"make_3d_plots:background_color\">\\n'+\\\n '<option selected>black\\n<option>white\\n</select></td></tr>\\n'\n \n self.assertEqual(obs,exp)", "def __init__(self, *args, **kwargs):\n super(TaggedContentItemForm, self).__init__(*args, **kwargs)\n wtf = Tag.objects.filter(group__system=False)\n wlist = [w for t, w in self.fields.items() if t.endswith(\"tags\")]\n choices = []\n for choice in wtf:\n choices.append((choice.id, str(choice)))\n [setattr(w, 'choices', choices) for w in wlist]", "def add_choice(self, name, value):\r\n self.choices += [{\"name\": name, \"value\": value}]", "def selection_field_vocab(context, widget, data):\n return [\n ('opt_1', _('opt_1', default=u'Option 1')),\n ('opt_2', _('opt_2', default=u'Option 2')),\n ('opt_3', _('opt_3', default=u'Option 3'))\n ]", "def configure_list_of_choices_type_question(self, question_data):\n self.driver.find_radio_button(LIST_OF_CHOICE_RB).click()\n index = 1\n for choice in fetch_(CHOICE, from_(question_data)):\n if index > 1:\n self.driver.find(ADD_CHOICE_LINK).click()\n self.driver.find_text_box(by_xpath(CHOICE_XPATH_LOCATOR + \"[\" + str(index) + \"]\" + CHOICE_TB_XPATH_LOCATOR)).enter_text(choice)\n index += 1\n choice_type = fetch_(ALLOWED_CHOICE, from_(question_data))\n if ONLY_ONE_ANSWER == choice_type:\n self.driver.find_radio_button(ONLY_ONE_ANSWER_RB).click()\n elif MULTIPLE_ANSWERS == choice_type:\n self.driver.find_radio_button(MULTIPLE_ANSWER_RB).click()\n return self", "def create_dummy_form(title,text,fill_choice=[],choice_length=[]):\n # fill it with blank for dummy choices\n count=0\n choices=[]\n while count < 8:\n choices.append(None)\n count+=1\n \n # fill choices based on value on fill_choice\n for i in fill_choice:\n try :\n length = choice_length[i]\n except IndexError :\n length = 10\n choices[i] = create_random_string(length)\n\n dummy_form=CreatePollQuestion(\n {\"question_title\":title,\n \"question_text\" :text,\n \"choice_1\":choices[0],\n \"choice_2\":choices[1],\n \"choice_3\":choices[2],\n \"choice_4\":choices[3],\n \"choice_5\":choices[4],\n \"choice_6\":choices[5],\n \"choice_7\":choices[6],\n \"choice_8\":choices[7],\n })\n\n return dummy_form", "def test_distribution_choices_added_successfully(self):\n valid_choices = [\"cpu\", \"memory\"]\n for good_input in valid_choices:\n self.ocp_data[\"distribution\"] = good_input\n self.assertEqual(self.ocp_data[\"distribution\"], good_input)\n with tenant_context(self.tenant):\n instance = None\n serializer = CostModelSerializer(data=self.ocp_data, context=self.request_context)\n if serializer.is_valid(raise_exception=True):\n instance = serializer.save()\n self.assertIsNotNone(instance)\n self.assertIsNotNone(instance.uuid)", "def seed(request):\n samples_path = path.join(path.dirname(__file__), 'samples.json')\n with open(samples_path, 'r') as samples_file:\n samples_polls = json.load(samples_file)\n\n for sample_poll in samples_polls:\n poll = Poll()\n poll.text = sample_poll['text']\n poll.pub_date = timezone.now()\n poll.save()\n\n for sample_choice in sample_poll['choices']:\n choice = Choice()\n choice.poll = poll\n choice.text = sample_choice\n choice.votes = 0\n choice.save()\n\n return HttpResponseRedirect(reverse('app:home'))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
create data that use Answer model
def create_answer(question, user): return Answer.objects.create(question=question,answered_by=user)
[ "def __create_answering_data(self, answer):\n return {\"is_same\": answer, \"is_skipped\": False, \"_input_value_names\": \"\"}", "def create_answer(self, answer_form):\n return # osid.assessment.Answer", "def generate_questions(self):", "def create_quiz():\n user = User.objects.create(username='test', password='hello', is_active=True)\n quiz = Quiz.objects.create(subject='math', author=user, instructions='test instructions', description='test description')\n question1 = Question.objects.create(quiz=quiz, body='The first question')\n question2 = Question.objects.create(quiz=quiz, body='The second question')\n question3 = Question.objects.create(quiz=quiz, body='The third question')\n answer11 = Answer.objects.create(body='correct', point_value=3, question=question1)\n answer12 = Answer.objects.create(body='semi-correct', point_value=2, question=question1)\n answer13 = Answer.objects.create(body='wrong', point_value=1, question=question1)\n answer21 = Answer.objects.create(body='correct', point_value=3, question=question2)\n answer22 = Answer.objects.create(body='correct', point_value=2, question=question2)\n answer23 = Answer.objects.create(body='correct', point_value=1, question=question2)\n answer31 = Answer.objects.create(body='correct', point_value=3, question=question3)\n answer32 = Answer.objects.create(body='correct', point_value=2, question=question3)\n answer33 = Answer.objects.create(body='correct', point_value=1, question=question3)\n questions = [question1, question2, question3]\n answers = [answer11, answer12, answer13, answer21, answer22, answer23,\n answer31, answer32, answer33]\n return [user, quiz, questions, answers]", "def __init__(self, question, answer):\n\n self.question = question\n self.answer = answer", "def __init__(self, question, answer):\n\n self.question = question\n self.answer = answer\n\n self.q_and_a = {\n 'Question:': self.question,\n 'Correct Answer:': self.answer,\n }", "def _create_response_model(self, data):\n pass", "def create(self, request, format=None):\n serializer = QuestionAnswerSerializer(data=request.data)\n\n if serializer.is_valid():\n topic_id = serializer.validated_data['topic']\n question_text = serializer.validated_data['question']\n list_of_answer_text = serializer.validated_data['answers']\n list_of_wrong_answer_text = serializer.validated_data.get('wrong_answers') or []\n\n try:\n # Get the relevant topic.\n topic = self.topic_queryset().get(id=topic_id)\n except Exception as e:\n # Topic does not exist.\n return Response({\"error_description\": \"Topic Does Not Exist\"}, status=status.HTTP_400_BAD_REQUEST)\n\n user = self.request.user\n list_of_answer_instances = []\n for answer_text in list_of_answer_text:\n if self.answer_queryset().filter(text=answer_text):\n # If the answer already exists, for this user, we will use this answer.\n list_of_answer_instances.append(self.answer_queryset().get(text=answer_text))\n else:\n # Otherwise, we create a new answer object.\n new_answer = Answer.objects.create(text=answer_text, creator=user)\n list_of_answer_instances.append(new_answer)\n\n # Get wrong answers\n list_of_wrong_answer_instances = []\n for answer_text in list_of_wrong_answer_text:\n if self.answer_queryset().filter(text=answer_text):\n # If the answer already exists, for this user, we will use this answer.\n list_of_wrong_answer_instances.append(self.answer_queryset().get(text=answer_text))\n else:\n # Otherwise, we create a new answer object.\n new_answer = Answer.objects.create(text=answer_text, creator=user)\n list_of_wrong_answer_instances.append(new_answer)\n\n # If the question already exists in the database, we will just add the topic and the answers to this question\n if self.question_queryset().filter(text=question_text):\n question = Question.objects.get(text=question_text)\n question.topic.add(topic)\n question.answers.add(*list_of_answer_instances)\n question.wrong_answers.add(*list_of_wrong_answer_instances)\n else:\n # Otherwise, create a new question then add to the database.\n question = Question.objects.create(text=question_text, creator=user)\n question.topic.add(topic)\n question.answers.add(*list_of_answer_instances)\n question.wrong_answers.add(*list_of_wrong_answer_instances)\n\n # Save the question to the database.\n question.save()\n\n response_dict = {\n 'topic': topic.id,\n 'question_id': question.id,\n 'question_text': question.text,\n 'answers': [],\n 'wrong_answers': [],\n }\n\n for answer in question.answers.all():\n answer_dict = {\n 'answer_id': answer.id,\n 'answer_text': answer.text,\n 'correct': True\n }\n response_dict['answers'].append(answer_dict)\n\n for answer in question.wrong_answers.all():\n wrong_answer_dict = {\n 'answer_id': answer.id,\n 'answer_text': answer.text,\n 'correct': False\n }\n response_dict['wrong_answers'].append(wrong_answer_dict)\n\n return Response(response_dict, status=status.HTTP_201_CREATED)\n\n # Compile all errors into an error_description\n error_description_list = []\n for key, _ in serializer.errors.items():\n if key == 'question':\n error_description_list.append(\"Question cannot be blank.\")\n if key == 'answers':\n error_description_list.append(\"Answers cannot be blank.\")\n\n error_description = \" \".join(error_description_list)\n\n return Response({\"error_description\": error_description}, status=status.HTTP_400_BAD_REQUEST)", "def __init__(self):\n self.answers = []", "def get_question_template() -> object:\n return {\"question id\": \"\",\n \"category\": \"\",\n \"type\": \"\",\n \"info\": \"\",\n \"data\": {},\n \"user answer\": {},\n \"user score\": 0,\n \"full score\": 0\n }\n pass", "def _create_answers_dict(self, answers_json_path):\n\n # There are no answers in the test dataset\n if self.dataset_type == DatasetType.TEST and not self.val_test_split:\n return {}\n\n print('Loading VQA answers data from ->', answers_json_path)\n answers_json = json.load(open(answers_json_path))\n\n # Please note that answer_id's for the 10 human rater answers are not unique across all the answers.\n # They are only unique for the particular question. However, we are only using these values for \n # post-analysis, so we only record the corresponding list of 10 strings. Note that the dataset also includes\n # a self-reported \"confidence\" (yes/maybe/no) for each one; we are not currently using that information.\n\n # keep the official label (`multiple_choice_answer`) and also all 10 human ratings\n answers = {}\n example = 0\n for annotation in answers_json['annotations']:\n rater_annotations = []\n for rater_responses in annotation['answers']:\n rater_annotations.append(process_answer(rater_responses.get('answer', None)))\n if example < 5:\n print('Sample rater annotations:', rater_annotations)\n example += 1 \n next_answer = Answer(answer_id=annotation['question_id'] * 10,\n answer_str=process_answer(annotation['multiple_choice_answer']),\n question_id=annotation['question_id'],\n image_id=annotation['image_id'],\n question_type=annotation['question_type'],\n answer_type=annotation['answer_type'],\n annotations=rater_annotations,\n n_answer_classes=self.n_answer_classes)\n answers[annotation['question_id'] * 10] = next_answer\n\n return answers", "def create_questionnaire_with(self, questionnaire_data):\n questionnaire_code = fetch_(QUESTIONNAIRE_CODE, from_(questionnaire_data))\n gen_ramdom = fetch_(GEN_RANDOM, from_(questionnaire_data))\n if gen_ramdom:\n questionnaire_code = questionnaire_code + generateId()\n self.driver.find_text_box(QUESTIONNAIRE_CODE_TB).enter_text(questionnaire_code)\n self.create_default_question(questionnaire_data[DEFAULT_QUESTION], DEFAULT_QUESTION_LINK)\n for question in fetch_(QUESTIONS, from_(questionnaire_data)):\n self.driver.find(ADD_A_QUESTION_LINK).click()\n self.fill_question_and_code_tb(question)\n self.SELECT_FUNC[fetch_(TYPE, from_(question))](question)\n return self", "def create_answers(self, answer_list):\n for i in range(0, len(answer_list)):\n self.valid_answers[answer_list[i]] = i", "def create(self, validated_data):\n # TO DO: Calculate if given answer is correct or not.\n try:\n points = QuestionAttemptPoint.objects.all()[0]\n except:\n points = None\n try:\n question = Question.objects.get(\n id=int(validated_data['question_id'])\n )\n except Question.DoesNotExist:\n raise Http404\n point = 0\n total_points = 0\n if points:\n total_points = points.first\n if question.correct.lower() == validated_data['given_answer'].lower():\n is_correct = True\n if points:\n attempt_count = int(validated_data['attempt_count'])\n if attempt_count == 1:\n point = points.first\n elif attempt_count == 2:\n point = points.second\n elif attempt_count == 3:\n point = points.third\n else:\n point = points.fourth\n else:\n is_correct = False\n try:\n quiz = Quiz.objects.get(\n id=int(validated_data['quiz_id'])\n )\n except Quiz.DoesNotExist:\n raise Http404\n try:\n # get attempt if by any chance already exists\n question_attempt = QuestionAttempt.objects.get(\n quiz=quiz, question=question\n )\n question_attempt.point = point\n question_attempt.given_answer = validated_data['given_answer']\n question_attempt.is_correct = is_correct\n question_attempt.save()\n except QuestionAttempt.DoesNotExist:\n question_attempt = QuestionAttempt(\n question=question,\n given_answer=validated_data['given_answer'],\n is_correct=is_correct,\n attempted=True,\n point=point,\n quiz=quiz\n )\n question_attempt.save()\n\n # save quiz result\n quiz_result, created = QuizResult.objects.get_or_create(\n quiz=quiz\n )\n all_question_attempts = QuestionAttempt.objects.filter(\n quiz=quiz\n )\n full_marks = total_points * len(quiz.questions.all())\n obtained_marks = sum([i.point for i in all_question_attempts])\n quiz_result.total_score = full_marks\n quiz_result.obtained_score = obtained_marks\n quiz_result.save()\n\n data = {\n 'correct': is_correct,\n 'correct_option': question.correct,\n 'point': point,\n 'total_points': total_points\n }\n data.update(validated_data)\n return data", "def create_exam():\n exam = Exam.objects.create(name='Test Exam',\n kind = ExamKind.CI,\n stage = ExamStage.DEV,\n description='an exam for testing')\n concept_type = ContentType.objects.get_for_model(Concept)\n concept = Concept.objects.get(name = \"Concept A\")\n FreeResponseQuestion.objects.create(exam=exam,\n question=\"What is the answer to this FR question?\",\n number=1,\n content_type=concept_type,\n object_id=concept.id)\n concept = Concept.objects.get(name = \"Concept B\")\n mcq = MultipleChoiceQuestion.objects.create(exam=exam,\n question=\"What is the answer to this MC question?\",\n number=2,\n content_type=concept_type,\n object_id=concept.id)\n MultipleChoiceOption.objects.create(question=mcq, text=\"choice 1\", index=1, is_correct=True)\n MultipleChoiceOption.objects.create(question=mcq, text=\"choice 2\", index=2)\n MultipleChoiceOption.objects.create(question=mcq, text=\"choice 3\", index=3)\n return exam", "def create_answerset(trainqa_path, answerset_path):\n train_qa = pd.read_json(trainqa_path)\n # print(train_qa)\n answer_freq = train_qa['answer'].value_counts()\n answer_freq = DataFrame(answer_freq.iloc[0:2000])\n answer_freq.to_csv(answerset_path, columns=[], header=False)", "def __init__(self):\r\n\t\twith open(\"eqs.json\") as qData:\r\n\t\t\tself.questions = json.load(qData)\r\n\t\twith open(\"eqsave.json\") as uData:\r\n\t\t\tself.records = json.load(uData)\r\n\t\tself.types = {\"1\": \"Reformer\", \"2\": \"Helper\", \"3\": \"Achiever\", \"4\": \"Individualist\", \"5\": \"Investigator\", \"6\": \"Loyalist\", \"7\": \"Enthusiast\", \"8\": \"Challenger\", \"9\": \"Peacemaker\"}", "def prepare(self, answer_one_hot_mapping, tokenizer=None):\n\n # if this isn't a training dataset, the answer one hot indices are expected to be available\n if (self.dataset_type != DatasetType.TRAIN):\n assert(answer_one_hot_mapping != None)\n\n # Load Questions and Answers\n questions = self._create_questions_dict(self.questions_path)\n print('Questions dict created. Num entries: {}'.format(len(questions))) \n \n # Add complementary pairs data (only exists for VQA v2 dataset)\n if self.dataset == 'v2':\n questions = self._set_question_complements(questions, self.pairs_path)\n\n answers = self._create_answers_dict(self.answers_path)\n print('Answers dict created. Num entries: {}'.format(len(answers)))\n \n # Load Image IDs from the embeddings file\n image_ids = self._get_image_ids(self.image_features_path)\n images = self._create_images_dict(image_ids)\n print('Images dict created for {} embeddings'.format(self.image_embed_model))\n\n # We only keep the n_answer_classes choices for answers as this\n # is formulated as a classification problem\n answers = self._encode_answers(answers, answer_one_hot_mapping)\n\n # Initialize Tokenizer and GloVe matrix\n if tokenizer is not None:\n self.tokenizer = tokenizer\n print('Using the Tokenizer that was provided by the training set.')\n if self.sent_init_type == 'glove':\n self._init_tokenizer(questions, answers, build_glove_matrix=True)\n print('Tokenizer trained and GloVe matrix built...')\n else:\n self._init_tokenizer(questions, answers)\n print('Tokenizer trained...')\n \n max_len = 0 # To compute the maximum question length\n # Tokenize and encode questions and answers\n example = 0\n for _, question in questions.items():\n if example < 5:\n print('Sample question string to tokenize: ', question.question_str)\n print('- corresponding token sequence: ', question.tokenize(self.tokenizer,self.need_pos_tags))\n else:\n question.tokenize(self.tokenizer,self.need_pos_tags)\n example += 1\n # Get the maximum question length\n if question.get_tokens_length() > max_len:\n max_len = question.get_tokens_length()\n print('Questions tokenized...')\n\n # If the question max len has not been set in options file, assign to the\n # maximum question length in the dataset\n if not self.max_sentence_len:\n self.max_sentence_len = max_len\n print('Actual max sentence length: {}'.format(max_len))\n print('Model uses max sentence length: {}'.format(self.max_sentence_len))\n \n for _, answer in answers.items():\n answer.tokenize(self.tokenizer)\n print('Answers tokenized...')\n\n self._create_samples(images, questions, answers)\n\n print('\\nSample Questions -> Answers')\n print('---------------------------')\n _, ques_strings, _, _, _, _, ans_strings, _, _, _ = self.get_qa_lists()\n for q, a in zip(ques_strings[:20], ans_strings[:20]):\n print('{} -> {} '.format(q, a))", "def _bind_answers(question_feedback, list_str_answers: list) -> None:\n for str_answer in list_str_answers:\n QuestionFeedbackAnswer.objects.create(question_feedback=question_feedback, content=str_answer)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
same as create_user but using user manager
def create_user_using_manager(username,password): manager = UserManager() return manager.create_user(username=username, password=password)
[ "def create_user(self):\n User.objects.create_user('test', 'testing@test.com', 'testing')", "def create_normal_user(self, **kwargs):\n return self.UserModel.objects.create_user(\n **kwargs\n )", "def create_user(user):\n create_edx_user(user)\n create_edx_auth_token(user)", "def sample_user(**params):\n return get_user_model().objects.create_user(**params)", "def users_create():", "def create_user(user, first_name, last_name, major, bio):\n return userAccount.objects.create(user=user, first_name=first_name, last_name=last_name, major=major, bio=bio)", "def createUser(self):\n if self.user:\n return self.user\n from soc.models.user import User\n from soc.modules.seeder.logic.providers.user import CurrentUserProvider\n properties = {'account': CurrentUserProvider(),\n 'status': 'valid', 'is_developer': self.dev_test}\n self.user = self.seed(User, properties=properties)\n return self.user", "def create_user(uname,password):\n new_user = User(uname,password)\n return new_user", "def create_superuser(self, *args, **kwargs):\n self.create_user(*args, **kwargs)", "def createUser(self):\n if self.user:\n return self.user\n from soc.models.user import User\n from soc.modules.seeder.logic.providers.user import CurrentUserProvider\n properties = {'account': CurrentUserProvider(),\n 'status': 'valid', 'is_developer': self.dev_test}\n self.user = seeder_logic.seed(User, properties=properties)\n return self.user", "def sample_user(email='test@tullyapp.com', password='password'):\n return get_user_model().objects.create_user(email, password)", "def post(self):\r\n return create_user(request)", "def test_create_user(self):\n pass", "def create_user(self, *args, **kwargs):\n user = User.objects.create_user(*args, **kwargs)\n return get_profile(user)", "def test_create_system_user(self):\n pass", "def create_user(user: User):\n db_session.add(user)\n db_session.commit()\n return user.id", "def create_normal_user(sender, instance, created, **kwargs):\n\n if created:\n NormalUser.objects.create(user=instance)", "def create_test_user():\n return User.objects.create(username='test_username', password='test_password')", "def create_user_and_auth_token(self):\n # check if role exists\n self.base_role = Role.objects.filter(name='User').first()\n if self.base_role is None:\n self.base_role = Role.objects.create(name='User')\n\n self.auth_user = User.objects.create(username='123', password='123', email='123@abv.bg', score=123, role=self.base_role)\n self.auth_token = 'Token {}'.format(self.auth_user.auth_token.key)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
populate question object with random string and user
def populate_poll(user="",total=10): user_list = None #create random user only when user argument empty if user == "": create_random_user(20) user_list = User.objects.all() for i in range(total): Question.objects.create( created_by=random.choice(user_list) if user_list is not None else user, title=create_random_string(seed_random(10)), text=create_random_string(seed_random(300)), slug=create_random_string(seed_random(100)) )
[ "def create_random_question(username):\n global questions\n user = users[username]\n available_questions_ids = list(questions.keys() - user[\"questions_asked\"])\n if len(questions) == 0:\n return None\n question_id = random.choice(available_questions_ids)\n question = questions[question_id]\n question_txt = question[\"question\"]\n question_possible_answers = question[\"answers\"]\n question = list()\n question.append(str(question_id))\n question.append(question_txt)\n question += question_possible_answers\n return chatlib.join_data(question)", "def generate_questions(self):", "def __init__(self, question, answer):\n\n self.question = question\n self.answer = answer\n\n self.q_and_a = {\n 'Question:': self.question,\n 'Correct Answer:': self.answer,\n }", "def notAFan_questions(user):\n questions = {\n 1: \"GBB: How old are you? \",\n 2: \"GBB: What do you like to do in your free time? \",\n 3: \"GBB: What is your ethnicity? \",\n 4: \"GBB: What did you eat for breakfast? \",\n 5: \"GBB: Are you an early bird or a night owl? \",\n 6: \"GBB: Do you like football? \"\n }\n\n while True:\n num = random.randint(1, 6)\n answered = user['personal questions asked']\n if num not in answered:\n user['personal questions asked'].append(num)\n return questions[num]\n if len(answered) == len(questions.keys()):\n return \"GBB: Looks like I know you so well that I don't even need to ask you a question! Type anything to proceed.\"", "def get_question_template() -> object:\n return {\"question id\": \"\",\n \"category\": \"\",\n \"type\": \"\",\n \"info\": \"\",\n \"data\": {},\n \"user answer\": {},\n \"user score\": 0,\n \"full score\": 0\n }\n pass", "def personal_questions(user):\n questions = {\n 1: 'GBB: How long have you been a fan of the Packers?',\n 2: 'GBB: Why are you a fan of the Packers?',\n 3: \"GBB: What team do you hate the most?\",\n 4: \"GBB: Who's your favorite player on the Packers?\",\n 5: \"GBB: Who's your least favorite player on the Packers?\",\n 6: \"GBB: Do you come from a family of Packer fans, or are you a lone ranger?\"\n }\n\n while True:\n num = random.randint(1, 6)\n answered = user['personal questions asked']\n if num not in answered:\n user['personal questions asked'].append(num)\n return questions[num]\n if len(answered) == len(questions.keys()):\n return \"GBB: Look's like I know you so well that I don't even need to ask you a question!\"", "def SendNewQuestionToUser(self, user_data, opt_message=None):\n if user_data is not None:\n if not user_data.available_questions:\n question_id = -1\n else:\n question_id = random.choice(user_data.available_questions)\n else:\n question_id = random.randint(0, len(questions) - 1)\n\n if opt_message is None:\n message = {}\n else:\n message = opt_message\n\n if question_id >= 0:\n message['q'] = {\n 'id': question_id,\n 'q': questions[question_id]['q'],\n 'a': questions[question_id]['a'][:]\n }\n random.shuffle(message['q']['a'])\n else:\n message['c'] = True\n\n self.Send(message)", "def __init__(self, question, answer):\n\n self.question = question\n self.answer = answer", "async def create_new_question(self):\n if self.idol_post_msg:\n await self.idol_post_msg.delete()\n if self.rounds >= self.max_rounds:\n if self.force_ended:\n return\n return await self.display_winners()\n self.idol = await ex.get_random_idol()\n if self.gender:\n while self.idol.gender != self.gender:\n self.idol = await ex.get_random_idol()\n self.correct_answers = []\n for alias in self.idol.aliases:\n self.correct_answers.append(alias.lower())\n self.correct_answers.append(self.idol.full_name.lower())\n self.correct_answers.append(self.idol.stage_name.lower())\n print(self.correct_answers)\n self.idol_post_msg, url = await ex.idol_post(self.channel, self.idol, user_id=self.host, guessing_game=True, scores=self.players)\n # add reaction here\n await self.check_message()", "def __init__(self, question, correct_answer):\n\n self.question = question\n self.correct_answer = correct_answer", "def get_question(self, user_state):\n\n if not user_state.current_session:\n\n # Create the new session\n new_session = user_state.create_session()\n # Create the new block for the session\n new_block = Block.objects.create(session=new_session)\n\n # Fill up the new block with random qandas\n while not new_block.is_full:\n QandA.objects.create(question=self.get_random(user_state), block=new_block)\n\n # Add the new block\n user_state.add_block(new_block)\n\n return user_state.current_session.current_block.get_question()", "def create_answer(question, user):\n return Answer.objects.create(question=question,answered_by=user)", "def quiz_mechanism(quiz_dict): \n answer, prompt_list = random.choice(list(quiz_dict.items()))\n prompt = random.choice(prompt_list)\n return answer, prompt", "def randomize_question(self):\n if len(self.final_questions_list) < self.num_questions:\n self.num_questions = len(self.final_questions_list)\n self.final_questions_list = sample(self.final_questions_list, self.num_questions)", "def choose_question():\r\n random_index_question = randint(1, question.num_question + 1)\r\n random_question = question.question[random_index_question]\r\n correct_answer = question.answer[random_index_question]\r\n return random_question, correct_answer", "def get_random_question(self):\n available_qs = self.available_qs\n if available_qs.exists():\n return random.choice(available_qs)", "def generate_questions(self):\n p = Parser(self.question)\n self.cleaned_question = p.clean_question()", "def add_user_answer(self, question, guess, correct):\n user_answer = UserAnswer()\n user_answer.user = self.user\n user_answer.quiz = self.quiz\n user_answer.question = question\n user_answer.answer = guess\n user_answer.correct = correct\n user_answer.save()", "def convert_question(self, q):\n\n item = {}\n item['id'] = q['id']\n item['title'] = q['title']\n item['body'] = q['text']\n item['author_id'] = q['author']['id']\n item['author'] = q['author']['username']\n item['url'] = q['url']\n item['score'] = q['score']\n item['score_label'] = self.convert_count(q['score'])\n item['answer_count'] = q['answer_count']\n item['answer_count_label'] = self.convert_count(q['answer_count'])\n item['view_count'] = q['view_count']\n item['view_count_label'] = self.convert_count(q['view_count'])\n item['added_at'] = q['added_at']\n item['added_at_label'] = timeago.format(datetime.fromtimestamp(int(q['added_at']), TIMEZONE), datetime.now(TIMEZONE))\n item['last_activity'] = q['last_activity_at']\n item['last_activity_label'] = timeago.format(datetime.fromtimestamp(int(q['last_activity_at']), TIMEZONE), datetime.now(TIMEZONE))\n item['has_more_comments'] = False\n item['has_more_answers'] = False\n item['has_accepted_answer'] = q['has_accepted_answer']\n item['closed'] = q['closed']\n\n item['tags'] = []\n for tag in q['tags']:\n item['tags'].append({'name': tag})\n\n return item" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
create CreatePollQuestion dummy form
def create_dummy_form(title,text,fill_choice=[],choice_length=[]): # fill it with blank for dummy choices count=0 choices=[] while count < 8: choices.append(None) count+=1 # fill choices based on value on fill_choice for i in fill_choice: try : length = choice_length[i] except IndexError : length = 10 choices[i] = create_random_string(length) dummy_form=CreatePollQuestion( {"question_title":title, "question_text" :text, "choice_1":choices[0], "choice_2":choices[1], "choice_3":choices[2], "choice_4":choices[3], "choice_5":choices[4], "choice_6":choices[5], "choice_7":choices[6], "choice_8":choices[7], }) return dummy_form
[ "def create_question(self, question_form):\n return # osid.assessment.Question", "def testPollAddQuestion(self):\n pass", "def __init__(self, radio_poll, *args, **kwargs):\n super(RadioPollChoiceForm, self).__init__(*args, **kwargs)\n choices = (((None, '----'),) +\n tuple(radio_poll.answers.values_list('id', 'answer')))\n self.fields['radio_poll__%s' % str(radio_poll.id)] = (\n forms.ChoiceField(widget=forms.Select(),\n choices=choices,\n label=radio_poll.question))", "def test_create_new_form(self):\n\n survey = self._create_test_survey()\n assert survey is not None\n\n new_survey = SurveyForm.get(self.test_survey_name)\n assert new_survey is not None\n assert new_survey.form == self.test_form", "def get_question_form_for_create(self, item_id, question_record_types):\n return # osid.assessment.QuestionForm", "def create_poll(question, days):\n return Poll.objects.create(question=question,\n pub_date=timezone.now() + datetime.timedelta(days=days))", "def test_add_question(self):\n pass", "def test_create_single_poll_submission(self):\r\n # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "async def create_new_question(self):\n if self.idol_post_msg:\n await self.idol_post_msg.delete()\n if self.rounds >= self.max_rounds:\n if self.force_ended:\n return\n return await self.display_winners()\n self.idol = await ex.get_random_idol()\n if self.gender:\n while self.idol.gender != self.gender:\n self.idol = await ex.get_random_idol()\n self.correct_answers = []\n for alias in self.idol.aliases:\n self.correct_answers.append(alias.lower())\n self.correct_answers.append(self.idol.full_name.lower())\n self.correct_answers.append(self.idol.stage_name.lower())\n print(self.correct_answers)\n self.idol_post_msg, url = await ex.idol_post(self.channel, self.idol, user_id=self.host, guessing_game=True, scores=self.players)\n # add reaction here\n await self.check_message()", "def create_poll(question, days, order=0):\n return Poll.objects.create(question=question,\n pub_date=timezone.now()\n + datetime.timedelta(days=days),\n order=order)", "def generate_questions(self):", "def generate_questions(self):\n p = Parser(self.question)\n self.cleaned_question = p.clean_question()", "def process_questionform(request):\n \n \n pass", "def __init__(self, *args, **kwargs):\n question = kwargs.pop('question_text', None)\n super(MeetingSignInForm, self).__init__(*args, **kwargs)\n if question:\n self.fields['quick_question'].label = question", "def make_form(self):", "def create_choices(question_model, text=\"text\", total_votes = 0):\n return Choice.objects.create(question=question_model, \n text=text, \n total_votes=total_votes)", "def test_create_new_question(self):\n response = self.client().post('/questions', json=self.new_question)\n body = json.loads(response.data)\n\n question = Question.query.filter_by(id=body['created']).one_or_none()\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(body['success'], True)\n self.assertIsNotNone(question)", "def new_from_post():\n # If you make a post request with a question_id we will assume you want a new question editor\n # we will prepopulate the question new page with data from that question (if it is a valid question id)\n question_id = request.form['question_id'] if request.form['question_id'] else ''\n\n return render_template('questionNew.html', question_id=question_id)", "def new_question(self):\n self.__out__(self.__call_core__('g_new_question'))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts an array with WCS to altitude and azimuth coordinates
def getAltAz(arr,header,time,location): soln = wcs.WCS(header) coords = cartesian([arange(arr.shape[1]),arange(arr.shape[0])]) world = soln.wcs_pix2world(coords,0) radec = SkyCoord(ra=world[:,0],dec=world[:,1],frame='icrs',unit='deg') altaz = radec.transform_to(AltAz(obstime=time,location=telescope)) return altaz.alt.deg,altaz.az.deg,coords[:,0],coords[:,1]
[ "def getAltAzArray(ra, dec, timeArr):\n altitude = []\n azimuth = []\n for tt in timeArr:\n alt, az = findAltAz(ra, dec, jdT = tt) \n altitude.append(alt)\n azimuth.append(az)\n return np.array(altitude), np.array(azimuth)", "def AltAziConv(self): # Originally in CORRECT.PAS\n ObjRa = self.RaA / 54000.0\n ObjDec = self.DecA / 3600.0\n if ObjDec < -90:\n ObjDec = -89.9999999\n alt1 = sin(DegToRad(prefs.ObsLat))\n alt1 *= sin(DegToRad(ObjDec))\n co = cos(DegToRad(prefs.ObsLat))\n cd = cos(DegToRad(ObjDec))\n H = DegToRad((self.Time.LST - ObjRa) * 15.0)\n ct = cos(H)\n alt2 = co * cd * ct\n self.Alt = RadToDeg(asin(alt1 + alt2))\n self.Azi = RadToDeg(atan2(sin(H), ((cos(H) * sin(DegToRad(prefs.ObsLat))) - (tan(DegToRad(ObjDec)) * co))))\n self.Azi += 180.0 # algorithm counts azimuth from south!\n self.Azi = Reduce(self.Azi)", "def offset_to_altaz(xoff, yoff, azimuth, altitude):\n #Deal with situations where offset = 0?\n\n d = sqrt(xoff*xoff+yoff*yoff)\n pos = np.where(d==0)\n d=1e-12 * u.deg # add a very small offset to prevent math errors\n\n q = arctan(d.to(u.rad).value)\n\n sq = sin(q)\n xp1 = xoff * (sq/d)\n yp1 = yoff * (sq/d)\n zp1 = cos(q)\n\n cx = sin(altitude)\n sx = cos(altitude)\n\n xp0 = cx*xp1 - sx*zp1\n yp0 = yp1\n zp0 = sx*xp1 + cx*zp1\n\n obj_altitude = arcsin(zp0)\n obj_altitude[pos]=altitude\n obj_azimuth = arctan2(yp0,-xp0) + azimuth\n obj_azimuth[pos] = azimuth\n\n #if obj_azimuth.value < 0.:\n # obj_azimuth += 2.*pi\n #elif obj_azimuth.value >= (2.*pi ):\n # obj_azimuth -= 2.*pi\n\n return obj_altitude,obj_azimuth", "def horizontal_to_equatorial(latitude, lst, altitude, Azimuth):\n ha, dec = horizontal_to_hadec(latitude, altitude, Azimuth)\n# print repr(ha)+\",\"+repr(dec);\n ra = ha_to_ra(ha, lst)\n\n return ra, dec", "def geo_m_v2(data_array):\n r = 6378.137 #promien ziemi w km\n delta = np.zeros(data_array.size//7-1)\n alo = data_array[0][1]\n ala = data_array[0][2]\n count = 0\n for row in data_array[1:]:\n dLat = (row[2] - ala) * math.pi/180.0\n dLon = (row[1] - alo) * math.pi/180.0\n a = math.sin(dLat/2.0)**2 + math.cos(ala * math.pi/180.0) * math.cos(row[2] * math.pi/180.0)\\\n * math.sin(dLon/2.0)**2\n delta[count] = r * 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))#w km\n count += 1\n alo = row[1]\n ala = row[2]\n return delta", "def altaz(mjds, ra, dec, lat=core_lat):\n\n #compute hour angle in degrees\n ha = mjds2lst( mjds ) - ra\n if (ha < 0): ha = ha + 360\n\n #convert degrees to radians\n ha, dec, lat = list(map(math.radians, (ha, dec, lat)))\n\n #compute altitude in radians\n sin_alt = math.sin(dec)*math.sin(lat) + math.cos(dec)*math.cos(lat)*math.cos(ha)\n alt = math.asin(sin_alt)\n \n #compute azimuth in radians\n #divide by zero error at poles or if alt = 90 deg\n cos_az = (math.sin(dec) - math.sin(alt)*math.sin(lat))/(math.cos(alt)*math.cos(lat))\n az = math.acos(cos_az)\n\n #convert radians to degrees\n hrz_altitude, hrz_azimuth = list(map(math.degrees, (alt, az)))\n\n #choose hemisphere\n if (math.sin(ha) > 0): hrz_azimuth = 360 - hrz_azimuth;\n\n return hrz_altitude, hrz_azimuth", "def horizontal_to_zenithazimuth(altitude, Azimuth):\n\n return zenithazimuth_to_horizontal(altitude, Azimuth)", "def altaz_to_offset(obj_azimuth,obj_altitude,azimuth,altitude):\n\n daz = obj_azimuth - azimuth\n coa = cos(obj_altitude)\n\n xp0 = -cos(daz) * coa\n yp0 = sin(daz) * coa\n zp0 = sin(obj_altitude)\n\n cx = sin(altitude)\n sx = cos(altitude)\n\n xp1 = cx*xp0 + sx*zp0\n yp1 = yp0\n zp1 = -sx*xp0 + cx*zp0\n\n q = arccos(zp1)\n d = tan(q)\n alpha = arctan2(yp1,xp1)\n\n xoff = d * cos(alpha)\n yoff = d * sin(alpha)\n\n return xoff,yoff", "def get_azimuth(self):\n if self.packet_header[\"packet_code\"] == 28:\n azimuths = self.azimuths\n else:\n azimuths = [d[\"angle_start\"] * 0.1 for d in self.radial_headers]\n return np.array(azimuths, dtype=\"float32\")", "def wgs_to_ecef(cls, latitude, longitude, altitude):\n phi, lambda_, h = radians(latitude), radians(longitude), altitude\n N = cls.a / sqrt(1 - cls.e2 * sin(phi)**2)\n x = (N + h) * cos(phi) * cos(lambda_)\n y = (N + h) * cos(phi) * sin(lambda_)\n z = (N * (1 - cls.e2) + h) * sin(phi)\n return x, y, z", "def windvec(aux_wind):\r\n\r\n\r\n aux_wind = aux_wind.str.split('_')\r\n\r\n u = aux_wind.apply(lambda x: x[0])\r\n D = aux_wind.apply(lambda x: x[1])\r\n\r\n u = u.values\r\n D = D.values\r\n\r\n u = [float(i) for i in u]\r\n D = [float(i) for i in D]\r\n\r\n if u.__len__() == 0:\r\n return np.nan, np.nan\r\n\r\n # Test input array/value\r\n u, D = _arraytest(u,D)\r\n\r\n ve = 0.0 # define east component of wind speed\r\n vn = 0.0 # define north component of wind speed\r\n D = D * math.pi / 180.0 # convert wind direction degrees to radians\r\n for i in range(0, len(u)):\r\n ve = ve + u[i] * math.sin(D[i]) # calculate sum east speed components\r\n vn = vn + u[i] * math.cos(D[i]) # calculate sum north speed components\r\n ve = - ve / len(u) # determine average east speed component\r\n vn = - vn / len(u) # determine average north speed component\r\n uv = math.sqrt(ve * ve + vn * vn) # calculate wind speed vector magnitude\r\n # Calculate wind speed vector direction\r\n vdir = scipy.arctan2(ve, vn)\r\n vdir = vdir * 180.0 / math.pi # Convert radians to degrees\r\n if vdir < 180:\r\n Dv = vdir + 180.0\r\n else:\r\n if vdir > 180.0:\r\n Dv = vdir - 180\r\n else:\r\n Dv = vdir\r\n return uv, Dv # uv in m/s, Dv in dgerees from North\r", "def ecef2LatLonAlt(x, y, z):\n\n # Calculate the polar eccentricity\n ep = np.sqrt((EARTH.EQUATORIAL_RADIUS**2 - EARTH.POLAR_RADIUS**2)/(EARTH.POLAR_RADIUS**2))\n\n # Calculate the longitude\n lon = np.arctan2(y, x)\n\n p = np.sqrt(x**2 + y**2)\n\n theta = np.arctan2( z*EARTH.EQUATORIAL_RADIUS, p*EARTH.POLAR_RADIUS)\n\n # Calculate the latitude\n lat = np.arctan2(z + (ep**2)*EARTH.POLAR_RADIUS*np.sin(theta)**3, \\\n p - (EARTH.E**2)*EARTH.EQUATORIAL_RADIUS*np.cos(theta)**3)\n\n # Get distance from Earth centre to the position given by geographical coordinates, in WGS84\n N = EARTH.EQUATORIAL_RADIUS/math.sqrt(1.0 - (EARTH.E**2)*math.sin(lat)**2)\n\n \n # Calculate the height in meters\n\n # Correct for numerical instability in altitude near exact poles (and make sure cos(lat) is not 0!)\n if((np.abs(x) < 1000) and (np.abs(y) < 1000)):\n alt = np.abs(z) - EARTH.POLAR_RADIUS\n\n else:\n # Calculate altitude anywhere else\n alt = p/np.cos(lat) - N\n\n\n return lat, lon, alt", "def read_elevation(filepath):\n h = 83 #distance between elevation measures\n N = 1201\n theta = np.pi / 6\n elev_array = np.zeros((N, N))\n grad_array = np.zeros((N, N, 2))\n I_array = np.zeros((N, N))\n # Read the elevation data as described in Question 3, and store in the elvation array\n f = open(filepath, \"rb\")\n for i in range(N):\n for j in range(N):\n buf = f.read(2)\n val = struct.unpack(\">h\", buf)[0]\n elev_array[i][j] = val\n f.close()\n # Populate the gradient array\n for i in range(N):\n for j in range(N):\n #This if statements handle the border cases\n if j == 0:\n grad_array[i][j][0] = (elev_array[i][j+1] - elev_array[i][j]) / h\n elif j == N - 1:\n grad_array[i][j][0] = (elev_array[i][j] - elev_array[i][j-1]) / h\n else:\n grad_array[i][j][0] = (elev_array[i][j+1] - elev_array[i][j-1]) / (2 * h)\n \n if i == 0:\n grad_array[i][j][1] = (elev_array[i][j] - elev_array[i-1][j]) / h\n elif i == N - 1:\n grad_array[i][j][1] = (elev_array[i-1][j] - elev_array[i][j]) / h\n else:\n grad_array[i][j][1] = (elev_array[i-1][j] - elev_array[i+1][j]) / (2 * h)\n \n # Populate intensities\n for i in range(N):\n for j in range(N):\n denom = np.sqrt(grad_array[i][j][0] ** 2 + grad_array[i][j][1] ** 2 + 1)\n numer = np.cos(theta) * grad_array[i][j][0] + np.sin(theta) * grad_array[i][j][1]\n I_array[i][j] = -1 * numer / denom\n \n return elev_array, I_array", "def zenithazimuth_to_horizontal(zenith, azimuth):\n# altitude = norm_angle(pi / 2. - zenith)\n# Azimuth = norm_angle(pi / 2. - azimuth)\n altitude = norm_angle_copy(pi / 2. - zenith)\n Azimuth = norm_angle_copy(pi / 2. - azimuth)\n\n return altitude, Azimuth", "def get_altitude(points):\n altitudes = np.zeros((len(points),), dtype=\"float64\")\n for i, point in tqdm(enumerate(points), desc=\"GETTING ALTITUDE\"):\n p = Point(point[0], point[1])\n altitudes[i] = alt.NM_COTA.iloc[\n np.argmin([p.distance(alt.geometry.iloc[j]) for j in range(alt.shape[0])])\n ]\n return altitudes", "def altaz_to_xy(alt, az):\n # In case you pass in lists\n alt = np.asarray(alt)\n az = np.asarray(az)\n\n # Reverse of r interpolation\n r = np.interp(90 - alt, xp=theta_sw, fp=r_sw)\n az = az + 0.1 # Camera rotated 0.1 degrees.\n\n # Angle measured from vertical so sin and cos are swapped from usual polar.\n # These are x,ys with respect to a zero.\n x = -1 * r * np.sin(np.radians(az))\n y = r * np.cos(np.radians(az))\n\n # y is measured from the top!\n center = (512, 512)\n x = x + center[0]\n y = center[1] - y\n\n # Spacewatch camera isn't perfectly aligned, true zenith is 2 to the right\n # and 3 down from center.\n x += 2\n y += 3\n\n return (x, y)", "def find_orientations(satellite_centers):\n one_cutout_no = len(satellite_centers)\n all_thetas = np.zeros((one_cutout_no, 2))\n half = int(ellipse_cutout_size/2)\n\n for k in range(0, one_cutout_no):\n x, y = satellite_centers[k]\n im = objects[int(y) - half:int(y) + half, int(x) - half:int(x) + half]\n\n # find the minimum and maximum orientation from the ellipses found by hough transform\n min_th, max_th = hough_ellipse_orientation(im)\n # find the orientation from image moments\n _, _, theta_moment, _, _ = find_ellipse_parameters(im)\n theta_moment = adjust_angle(-np.pi / 2 - theta_moment)\n\n # if the orientation calculated from image moments is not too different from the min / max angles from hough\n # transform,\n if not np.abs(theta_moment - min_th) > np.pi / 6 or not np.abs(theta_moment - max_th) > np.pi / 6:\n # update minimum and maximum orientations as:\n min_th, max_th = min(min_th, max_th, theta_moment), max(min_th, max_th, theta_moment)\n\n all_thetas[k] = min_th, max_th\n\n print('Orientations calculated.')\n return all_thetas", "def get_azimuth (\r\n xlon: str | ArrayLike, \r\n ylat: str| ArrayLike, \r\n *, \r\n data: DataFrame =None, \r\n utm_zone:str=None, \r\n projection:str='ll', \r\n isdeg:bool=True, \r\n mode:str='soft', \r\n extrapolate:bool =...,\r\n view:bool=..., \r\n ):\r\n from ..site import Location \r\n \r\n mode = str(mode).lower() \r\n projection= str(projection).lower()\r\n extrapolate, view = ellipsis2false (extrapolate, view)\r\n\r\n xlon , ylat = assert_xy_in(xlon , ylat , data = data )\r\n \r\n if ( \r\n xlon.max() > 180. and ylat.max() > 90. \r\n and projection=='ll' \r\n and mode=='soft'\r\n ): \r\n warnings.warn(\"xlon and ylat arguments are greater than 180 degrees.\"\r\n \" we assume the coordinates are UTM. Set explicitly\"\r\n \" projection to ``UTM`` to avoid this warning.\")\r\n projection='utm'\r\n \r\n if projection=='utm':\r\n if utm_zone is None: \r\n raise TypeError (\"utm_zone cannot be None when projection is UTM.\")\r\n \r\n ylat , xlon = Location.to_latlon_in(\r\n xlon, ylat, utm_zone= utm_zone)\r\n \r\n if len(xlon) ==1 or len(ylat)==1: \r\n msg = \"Azimuth computation expects at least two points. Got 1\"\r\n if mode=='soft': \r\n warnings.warn(msg) \r\n return 0. \r\n \r\n raise TypeError(msg )\r\n # convert to radian \r\n if isdeg: \r\n xlon = np.deg2rad (xlon ) ; ylat = np.deg2rad ( ylat)\r\n \r\n dx = map (lambda ii: np.cos ( ylat[ii]) * np.sin( ylat [ii+1 ]) - \r\n np.sin(ylat[ii]) * np.cos( ylat[ii+1]) * np.cos (xlon[ii+1]- xlon[ii]), \r\n range (len(xlon)-1)\r\n )\r\n dy = map( lambda ii: np.cos (ylat[ii+1])* np.sin( xlon[ii+1]- xlon[ii]), \r\n range ( len(xlon)-1)\r\n )\r\n # to deg \r\n z = np.around ( np.rad2deg ( np.arctan2(list(dx) , list(dy) ) ), 3) \r\n azim = z.copy() \r\n if extrapolate: \r\n # use mean azimum of the total area zone and \r\n # recompute the position by interpolation \r\n azim = np.hstack ( ( [z.mean(), z ]))\r\n # reset the interpolare value at the first position\r\n with warnings.catch_warnings():\r\n #warnings.filterwarnings(action='ignore', category=OptimizeWarning)\r\n warnings.simplefilter(\"ignore\")\r\n azim [0] = scalePosition(azim )[0][0] \r\n \r\n if view: \r\n x = np.arange ( len(azim )) \r\n fig, ax = plt.subplots (1, 1, figsize = (10, 4))\r\n # add Nan to the first position of z \r\n z = np.hstack (([np.nan], z )) if extrapolate else z \r\n \r\n ax.plot (x, \r\n azim, \r\n c='#0A4CEE',\r\n marker = 'o', \r\n label ='extra-azimuth'\r\n ) \r\n \r\n ax.plot (x, \r\n z, \r\n 'ok-', \r\n label ='raw azimuth'\r\n )\r\n ax.legend ( ) \r\n ax.set_xlabel ('x')\r\n ax.set_ylabel ('y') \r\n\r\n return azim", "def get_azimuth(self, block):\n\n offset = self.__offset[block]\n # change byte order\n azi = ord(self.__dataset[offset + 2:offset + 3]) + \\\n (ord(self.__dataset[offset + 3:offset + 4]) << 8)\n azi /= 100.0\n return azi" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Rotates the ADP of 'atom' to match the orientation of 'source_atom.
def rotate_3D(atom, source_atom): from lauescript.cryst.match import get_transform lst2 = [np.array([0, 0, 0]), source_atom.orientation[0], source_atom.orientation[1]] lst1 = [np.array([0, 0, 0]), atom.orientation[0], atom.orientation[1]] matrix = get_transform(lst1, lst2, matrix=True) adp = source_atom.adp['cart_int'] atom.adp['cart_int'] = rotate_adp(adp, matrix)
[ "def orient_to_source(self):\n self.heading = self.start_horizontal.az - (90 * degree)\n self.heading = self.heading.to('degree')", "def assign_rotating_atoms(atom1,atom2,atoms): \n atomsToRotate = [atom2]\n for atom in atomsToRotate:\n atom.rotate = True\n for connectedAtomIndex in atom.connectivity:\n connectedAtom = atoms[connectedAtomIndex]\n if ((connectedAtom not in atomsToRotate) and\n (connectedAtom != atom1)):\n connectedAtom.rotate = True\n atomsToRotate.append(connectedAtom)", "def rotation_alignment(referent_shape, current_shape):\n numerator = 0.\n denominator = 0.\n\n for i in range(len(referent_shape.points)):\n numerator += current_shape.points[i, 0] * referent_shape.points[i, 1] - current_shape.points[i, 1] * referent_shape.points[i, 0]\n denominator += current_shape.points[i, 0] * referent_shape.points[i, 0] + current_shape.points[i, 1] * referent_shape.points[i, 1]\n\n return math.atan2(numerator, denominator)", "def rotate_ADP_about_axis(ADP, angle, axisDirection):\n adp = get_adp_as_matrix(ADP)\n u, v = np.linalg.eig(adp)\n startPoints = [v[:, i].flatten().tolist()[0] for i in xrange(3)]\n endPoints = [rotate_point_about_axis(point, angle, axisDirection, (0, 0, 0)) for point in startPoints]\n rotMat = get_transform(startPoints, endPoints, matrix=True).transpose()\n newadp = np.dot(rotMat.transpose(), np.dot(adp, rotMat))\n return newadp[0, 0], newadp[1, 1], newadp[2, 2], newadp[0, 1], newadp[0, 2], newadp[1, 2]", "def _rotate(self):\n self.image = pg.transform.rotate(self.image, (self._attr[\"direction\"] - self._attr[\"reference\"]))\n self._attr[\"reference\"] = self._attr[\"direction\"]", "def rotate_atoms(atom_list,theta,axis,ref_pt):\n\n import numpy\n from numpy import dot as dot\n from numpy import cross as cross\n from math import cos as cos\n from math import sin as sin\n\n new_atom_list = []\n for atome in atom_list:\n new_atom = atom()\n\n # first we compute the relative vector\n v_rel = numpy.array([atome.x,atome.y,atome.z]) - ref_pt\n\n # then we rotate\n v_rot = dot(v_rel,axis)*axis+cos(theta)*(v_rel-dot(v_rel,axis)*axis)+ \\\n sin(theta)*cross(axis,v_rel)\n\n # then we reintroduce the absolute coordinates\n v_final = v_rot + ref_pt\n\n # now we update the atom coordinates\n new_atom.setsymbol(atome.symbol)\n new_atom.setx(v_final[0])\n new_atom.sety(v_final[1])\n new_atom.setz(v_final[2])\n\n # adding the new wf to the final list\n new_atom_list.append(new_atom)\n\n return new_atom_list", "def _align_with_x(self):\n\n v = np.array([self.LC_positions[self.lineatoms[0], :2] - self.LC_positions[self.lineatoms[1], :2]])\n angle = np.arctan2(v[0, 0, 1], v[0, 0, 0])\n self.LC_positions = transform.rotate_coords_z(self.LC_positions, - angle * 180 / np.pi)", "def alignment_att(alpha, p, treatment):\n assert p.shape[0] == treatment.shape[0]\n adj = alpha * (1 - treatment)\n return adj", "def _rotate_about_origin(self, angle, axis):\n matrix = rotation_matrix(angle, axis)\n self._normal = matrix.dot(self._normal)\n self._position = matrix.dot(self._position)", "def align(image):\n angle = find_angle(image)\n image = rotate(image, angle)\n return image", "def setAzEncoderOffset(ant) :\n \n # Retrieve current azimuth offset (arcmin), elevation (degrees) \n # and az encoder pointing offset (implementation specific).\n\n azOffMpName = \"Control.Antenna%d.azimuthOffset\"%ant\n pointingConstants = pointingSetup( ant )\n\n if device.CarmaAnt().isOvro(ant):\n actualElMpName = \"Ovro%d.AntennaCommon.Drive.Track.actualElevation\"%ant\n elif device.CarmaAnt().isBima(ant): \n bimaAntNo = ant - 6\n actualElMpName = \"Bima%d.AntennaCommon.Drive.Track.actualElevation\"%bimaAntNo\n [azOffset,actualEl ] = queryMpValues([azOffMpName, actualElMpName])\n elif device.CarmaAnt().isSza(ant): \n szaAntNo = ant - 15\n actualElMpName = \"Sza%d.AntennaCommon.Drive.Track.actualElevation\"%szaAntNo\n [azOffset,actualEl ] = queryMpValues([azOffMpName, actualElMpName])\n else:\n raise Exception, \"Invalid ant\"\n\n [azOffset,actualEl ] = queryMpValues([azOffMpName, actualElMpName])\n cosEl = math.cos( actualEl * math.pi / 180.0 )\n\n if device.CarmaAnt().isOvro(ant):\n pointingConstants[0] = pointingConstants[0] + azOffset/cosEl\n ovroMountPointingConstants( pointingConstants[0],\n pointingConstants[1],\n pointingConstants[2],\n pointingConstants[3],\n pointingConstants[4], ant )\n elif device.CarmaAnt().isBima(ant): \n pointingConstants[0][0] = pointingConstants[0][0] + azOffset/cosEl\n bimaMountPointingConstants( pointingConstants[0], pointingConstants[1], ant )\n elif device.CarmaAnt().isSza(ant): \n # For SZA, the az zero (term 7 in the pointing constants) is in degrees \n pointingConstants[6] += ( ( azOffset/cosEl ) / 60.0 );\n # Avoid having to spell out all 19 arguments by using the special \n # form '*args' with a list of ordered args.\n args = pointingConstants \n args.append( ant )\n szaMountPointingConstants( *args )\n else:\n raise Exception, \"Invalid ant\"\n\n return offset(0, 0, ant)", "def rotation_pt(self, params):\n return self._rotation(self._convertparams(params, self.arclentoparam_pt))", "def addOffsetRotation(self, point):\n\n ox, oy, oz = OpenMaya.MVector(0.0, 0.0, 0.0)\n px, py, pz = point\n\n # Z Rotation\n if self.offset_rotation.z != 0.0:\n point = self.rotateZ(point)\n\n # Y Rotation\n if self.offset_rotation.y != 0.0:\n point = self.rotateY(point)\n\n # X Rotation\n if self.offset_rotation.x != 0.0:\n point = self.rotateX(point)\n\n return point", "def _rotate_about_origin(self, angle, axis):\n matrix = rotation_matrix(angle, axis)\n self._center = matrix.dot(self._center)", "def rotate_local(self, angle, axis=(0., 0., 1.)):\n self.rotation *= aa2q(angle, glm.vec3(axis))", "def toAnp(P):\n P = copy.deepcopy(P)\n # Remove all precharges:\n removePrecharges(P)\n\n \"\"\"Chop trailing NOPs\"\"\"\n\n # Find the first rd/wr in the NA* schedule\n # This ensures we compare with the right bank when executing earliest()\n smallestRdWrCmd = findFirstRdWr(P)\n\n # Find the earliest possible location of a next RD/WR after this pattern.\n newLen = earliest(smallestRdWrCmd, P)\n return RawPattern(newLen, P, P)", "def move_to_angle(alpha, theta, pan_pos = 0, tilt_pos = 0, base_rate = 500, object_distance_hint = -1):\n\n if abs(alpha) > absolute_pan_limit or abs(theta) > absolute_tilt_limit:\n return (-1, -1)\n\n # First calculate pan movement\n # TODO Account for displacement perpendicular to pan axis.\n # Similar calculation to tilt displacement but will have\n # to take into account left or right of axis.\n pan_steps = int(alpha / pan_deg_per_step) - pan_pos\n\n # Calculate compensation for sensor displacement\n # if object distance hint is specified.\n theta_comp_deg = 0.0\n\n if object_distance_hint > 0:\n # Cannot look \"back\"\n if object_distance_hint < sensor_displacement:\n return (-1, -1, 0, 0)\n # Compute angle compensation and compare to system's step resolution.\n # No need to bother correcting an angle that the motors cannot reach.\n angle_sensitivity = deg_per_step / gear_ratio / micro_steps\n theta_comp = math.asin(sensor_displacement / object_distance_hint)\n theta_comp_deg = theta_comp * 180.0 / math.pi\n #print(f'sensitivity={angle_sensitivity}, comp={theta_comp}[rad]/{theta_comp_deg}[deg]')\n if theta_comp_deg < angle_sensitivity:\n theta_comp_deg = 0.0\n\n # Calculate tilt movement\n tilt_steps = pan_steps + (int(round((theta - theta_comp_deg) / tilt_deg_per_step)) - tilt_pos)\n\n # Calculate relative step rate per motor and output as list\n max_delta = max(abs(pan_steps), abs(tilt_steps))\n\n if max_delta > 0:\n return (abs(pan_steps), abs(tilt_steps), int(round(base_rate * pan_steps / max_delta)), int(round(base_rate * tilt_steps / max_delta)))\n else:\n return (-1, -1, 0, 0)", "def fromRotation(angle, dir):\n return Affine3(xform.rotation_matrix(math.radians(angle), dir))", "def rotate_atoms(torsion,atoms):\n #Set up easily read variables\n atom1Coord = np.array([torsion.tor_atoms[1].x,\n torsion.tor_atoms[1].y,\n torsion.tor_atoms[1].z])\n atom2Coord = np.array([torsion.tor_atoms[2].x,\n torsion.tor_atoms[2].y,\n torsion.tor_atoms[2].z])\n rotPhi = torsion.newAngle + torsion.currentAngle()\n rotationVector = atom2Coord - atom1Coord\n\n #Create Final Rotation Matrix\n YZRotMat = prepare_rotation_matrix(rotationVector)\n ZRotMat=np.matrix([[np.cos(rotPhi),-np.sin(rotPhi),0.0],\n [np.sin(rotPhi),np.cos(rotPhi),0.0],\n [0.0,0.0,1.0]])\n rotationMatrix = np.dot(YZRotMat.T,np.dot(ZRotMat,YZRotMat))\n \n #Perform Rotation\n for atom in atoms:\n if atom.rotate:\n atomToRotateCoord = np.array([atom.x,atom.y,atom.z])\n rotationVector = np.matrix(atomToRotateCoord - atom1Coord)\n atomToRotateCoord = np.dot(rotationVector,rotationMatrix.T) \n atomToRotateCoord = atomToRotateCoord + atom1Coord\n atom.x = float(atomToRotateCoord[[0],[0]])\n atom.y = float(atomToRotateCoord[[0],[1]])\n atom.z = float(atomToRotateCoord[[0],[2]])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads the measured ADP from the xd.res file. The parameters are stored in atom.adp['frac_meas'] and atom.adp['cart_meas']
def read_meas_adp(data, path='xd.res', use='meas'): use2 = 'frac_' + use switch = False filepointer = open(path, 'r') atomname = None for line in filepointer: if switch: split = [i for i in line.split(' ') if len(i) > 0] if not len(split) == 6: print('WARNING!!! Inconsistend number of floats while\ reading measured ADP.') data['exp'][atomname].adp[use2] = split switch = False if '(' in line: split = [i for i in line.split(' ') if len(i) > 0] if split[0][-1] == ')': switch = True atomname = split[0] use = 'cart_' + use for atom in data['exp'].atoms: # if use == 'cart_neut': print(atom) atom.adp[use] = rotate_adp2(atom.adp[use2], atom.molecule.frac2cartmatrix, atom.molecule.cell) return data
[ "def test_readPDAS(self):\n st = readPDAS(self.testfile)\n self.assertTrue(isinstance(st, Stream))\n self.assertTrue(len(st) == 1)\n tr = st[0]\n expected = [('COMMENT', 'GAINRANGED'),\n ('DATASET', 'P1246001108'),\n ('FILE_TYPE', 'LONG'),\n ('HORZ_UNITS', 'Sec'),\n ('SIGNAL', 'Channel1'),\n ('VERSION', 'next'),\n ('VERT_UNITS', 'Counts')]\n self.assertTrue(sorted(tr.stats.pop(\"pdas\").items()) == expected)\n expected = [('_format', 'PDAS'),\n (u'calib', 1.0),\n (u'channel', u''),\n (u'delta', 0.005),\n (u'endtime', UTCDateTime(1994, 4, 18, 0, 0, 2, 495000)),\n (u'location', u''),\n (u'network', u''),\n (u'npts', 500),\n (u'sampling_rate', 200.0),\n (u'starttime', UTCDateTime(1994, 4, 18, 0, 0)),\n (u'station', u'')]\n self.assertTrue(sorted(tr.stats.items()) == expected)\n expected = np.array([895, 867, 747, 591, 359, -129, -185, 3, 115, 243],\n dtype=np.int16)\n np.testing.assert_array_equal(tr.data[:10], expected)", "def readAD(self):\n\n fname = self.ad_file\n print \"reading ad file \", fname, \" curdir = \", os.getcwd()\n try:\n fh = open(fname,'r')\n self.lines_ad = fh.readlines()\n fh.close()\n except:\n sys.stdout.write (\"Error opening {:}\\n\".format(fname))\n return 0\n\n for i in range(len(self.lines_ad)):\n ln = self.lines_ad[i].split() \n if (len(ln) >1):\n if (ln[1] == \"NumFoil\"):\n self.nSeg = int(ln[0])\n break\n if (ln[1] == \"WindFile\" and self.wind_file == None):\n self.wind_file = ln[0][1:-1]\n self.af_dict = {}\n self.af_dict['polar_idx'] = [0]*self.nSeg\n self.af_dict['polar_files'] = [0]*self.nSeg\n print \"ln, nSeg, i\", ln, self.nSeg, i\n for j in range(self.nSeg):\n lnidx = i+1+j\n ln = self.lines_ad[lnidx].split()\n afpath = fix_path(ln[0].strip().strip(\"\\\"\").strip(\"\\'\"))\n ln[0] = \"\\\"%s\\\"\" % afpath\n self.lines_ad[lnidx] = unsplit(ln)\n self.af_dict['polar_idx'][j] = j+1\n self.af_dict['polar_files'][j] = afpath", "def _update_adp_calculation(self, Temp):\n from sys import stdout\n\n self.printer('\\n ...calculating ADPs...\\n')\n\n import time\n\n start = time.time()\n\n daba_counter = 0.\n max_counter = float(len(self.keys()))\n for molecule in self.keys():\n daba_counter += 1.\n\n pstate = daba_counter / max_counter\n pstate = int(58 * pstate)\n bar = '[' + pstate * '#' + (58 - pstate) * '-' + ']'\n print ' | {}\\r'.format(bar),\n stdout.flush()\n\n try:\n self[molecule].get_adp(Temp)\n\n except KeyError:\n self.errorlog.write('Error: No ADP calculated by atom.get_adp() for {}.'.format(molecule))\n end = time.time()\n self.printer('\\n\\n Time used for ADP calculation: {:5.3f} sec on {} CPUs'.format(end - start, 1))", "def read_raw_data(self, meas_name=''):\n if meas_name:\n self.selected_measure = meas_name\n else:\n meas_name = self.selected_measure\n\n is_big_endian = self._pna.data_endianess == 'big'\n data_request = 'CALCulate{}:DATA? SDATA'.format(self._channel)\n if self._pna.data_format == 'REAL,+32':\n data = self._pna.query_binary_values(data_request, datatype='f',\n is_big_endian=is_big_endian,\n container=np.ndarray)\n\n elif self._pna.data_format == 'REAL,+64':\n data = self._pna.query_binary_values(data_request, datatype='d',\n is_big_endian=is_big_endian,\n container=np.ndarray)\n\n elif self._pna.data_format == 'ASC,+0':\n data = self._pna.query_ascii_values(data_request, converter='f',\n container=np.ndarray)\n\n else:\n raise InstrIOError(cleandoc('''Agilent PNA did not return the\n channel {} formatted data for meas {}'''.format(\n self._channel, meas_name)))\n\n return data[::2] + 1j*data[1::2]", "def read_adj(adj_file, read_stns, read_metadata):\n\n stns = {}\n msrs = {}\n\n version = None\n reference_frame = None\n file_name = None\n file_date = None\n epoch = None\n geoid_model = None\n soln_type = None\n run_time = None\n parameters = None\n msr_count = None\n outlier_count = None\n degrees_of_freedom = None\n chi_squared = None\n sigma_zero = None\n global_pelzer = None\n chi_square_lower = None\n chi_square_upper = None\n chi_square_result = None\n\n with open(adj_file, 'r') as adj_fh:\n switches = Switches()\n switches.header = True\n stn_line = None\n mandatory_coord_types = 'PLHh'\n\n msr_line = None\n msr_count = 0\n dir_set = 0\n msr_header_line = None\n\n for line_count, line in enumerate(adj_fh):\n\n if 'Adjusted Measurements' in line:\n msr_line = line_count + 5\n msr_header_line = line_count + 3\n three_line_count = 0\n\n if msr_line:\n if line_count == msr_line:\n switches.reset()\n switches.msrs = True\n\n if msr_header_line:\n if line_count == msr_header_line:\n tstat_switch, msr_id_switch = read_msr_fields(line)\n\n if read_stns:\n if 'Adjusted Coordinates' in line:\n stn_line = line_count + 5\n\n if stn_line:\n if line_count == stn_line - 2:\n desc_index = line.find('Description')\n\n if line_count == stn_line:\n switches.reset()\n switches.stns = True\n\n if switches.header:\n if read_metadata:\n version = common_fn.read_metadata(line, 'Version:', version)\n reference_frame = common_fn.read_metadata(line, 'Reference frame:', reference_frame)\n epoch = common_fn.read_epoch(line, epoch)\n geoid_model = common_fn.read_metadata(line, 'Geoid model:', geoid_model)\n\n # simple text reads (and more complex, but common, types) are done by function\n file_name = common_fn.read_metadata(line, 'File name:', file_name)\n file_date = common_fn.read_file_date(line, file_date)\n soln_type = common_fn.read_metadata(line, 'SOLUTION', soln_type)\n parameters = common_fn.read_metadata(line, 'Number of unknown parameters', parameters)\n degrees_of_freedom = common_fn.read_metadata(line, 'Degrees of freedom', degrees_of_freedom)\n chi_squared = common_fn.read_metadata(line, 'Chi squared', chi_squared)\n sigma_zero = common_fn.read_metadata(line, 'Rigorous Sigma Zero', sigma_zero)\n\n # more complex reads have their own if statements.\n if line[:35] == 'Total time ':\n items = line[35:].strip().split(':')\n hour = int(items[0])\n minute = int(items[1])\n second = float(items[2])\n run_time = datetime.timedelta(hours=hour, minutes=minute, seconds=second)\n\n if line[:35] == 'Number of measurements ':\n items = line[35:].split()\n if len(items) > 1:\n if '(' in items[1]:\n outliers = int(items[1].replace('(', ''))\n else:\n outliers = 0\n msr_count = int(items[0])\n outlier_count = outliers\n\n if line[:35] == 'Global (Pelzer) Reliability ':\n items = line[35:].strip().split()\n try:\n global_pelzer = float(items[0])\n except ValueError:\n global_pelzer = float('NaN')\n\n if line[:35] == 'Station coordinate types: ':\n coord_types = line[35:].strip()\n missing_coord_types = set()\n for l in mandatory_coord_types:\n if l not in coord_types:\n missing_coord_types.add(l)\n if missing_coord_types:\n raise ValueError(f'Mandatory coordinate types {missing_coord_types} '\n f'not present in {adj_file}')\n\n # last header category, write to metadata object\n if line[:35] == 'Chi-Square test (95.0%) ':\n items = line[35:].strip().split()\n chi_square_lower = float(items[0])\n chi_square_upper = float(items[4])\n chi_square_result = items[6].strip()\n\n metadata = DynaMetadata(epoch=epoch, reference_frame=reference_frame,\n geoid_model=geoid_model, version=version)\n adj_stats = DynaAdj.AdjStats(\n soln_type=soln_type,\n run_time=run_time,\n parameters=parameters,\n msr_count=msr_count,\n degrees_of_freedom=degrees_of_freedom,\n sigma_zero=sigma_zero,\n chi_squared=chi_squared,\n outlier_count=outlier_count,\n global_pelzer=global_pelzer,\n chi_square_lower=chi_square_lower,\n chi_square_upper=chi_square_upper,\n chi_square_result=chi_square_result\n )\n\n switches.reset()\n\n if switches.stns:\n if len(line) < 20:\n switches.reset()\n continue\n\n # store details as a Station object, leaving uncertainty fields null\n stn_object = read_coord_elements(line, coord_types, desc_index)\n stns[stn_object.name] = stn_object\n\n if switches.msrs:\n if line.strip() == '':\n switches.reset()\n continue\n\n msr_components = read_msr_line(line, tstat_switch, msr_id_switch)\n msr_type = msr_components['msr_type']\n\n # Direction sets - initialise new msr object\n if msr_type == 'D':\n msr_count += 1\n dir_set += 1\n d_stn1 = msr_components['stn1']\n d_stn2 = msr_components['stn2']\n d_msr_id = [msr_components['msr_id']]\n\n msr_object = DynaAdj.Measurement(\n msr_type=msr_type,\n stn1=d_stn1,\n stn2=d_stn2,\n stn3=[],\n ignore=[],\n cardinal=[],\n msr=[],\n adj=[],\n cor=[],\n msr_sd=[],\n adj_sd=[],\n cor_sd=[],\n nstat=[],\n tstat=[],\n pelzer=[],\n pre_adj_cor=[],\n outlier=[],\n msr_id=d_msr_id,\n cluster_id=[]\n )\n\n msrs[msr_count] = msr_object\n\n # Direction Set pointings - append msrs to lists\n elif msr_type == ' ':\n msrs[msr_count].stn3.append(msr_components['stn3'])\n msrs[msr_count].ignore.append(msr_components['ignore'])\n msrs[msr_count].msr.append(str_to_dms_angle(msr_components['msr']))\n msrs[msr_count].adj.append(str_to_dms_angle(msr_components['adj']))\n msrs[msr_count].cor.append(float(msr_components['cor']))\n msrs[msr_count].msr_sd.append(float(msr_components['msr_sd']))\n msrs[msr_count].adj_sd.append(float(msr_components['adj_sd']))\n msrs[msr_count].cor_sd.append(float(msr_components['cor_sd']))\n msrs[msr_count].nstat.append(float(msr_components['nstat']))\n if tstat_switch:\n msrs[msr_count].tstat.append(float(msr_components['tstat']))\n else:\n msrs[msr_count].tstat.append(None)\n msrs[msr_count].pelzer.append(float(msr_components['pelzer']))\n msrs[msr_count].pre_adj_cor.append(float(msr_components['pre_adj_cor']))\n msrs[msr_count].outlier.append(msr_components['outlier'])\n msrs[msr_count].msr_id.append(msr_components['msr_id'])\n msrs[msr_count].cluster_id.append(msr_components['cluster_id'])\n\n # Type G/X/Y measurements (split over 3 lines)\n elif msr_type in three_line_msrs:\n three_line_count += 1\n\n if three_line_count % 3 == 1:\n # re-initialise lists at new msr\n cardinal_list = []\n msr_list = []\n adj_list = []\n cor_list = []\n msr_sd_list = []\n adj_sd_list = []\n cor_sd_list = []\n nstat_list = []\n tstat_list = []\n pelzer_list = []\n pre_adj_cor_list = []\n outlier_list = []\n msr_id = msr_components['msr_id']\n cluster_id = msr_components['cluster_id']\n\n stn1 = msr_components['stn1']\n\n if msr_components['stn2'] != '':\n stn2 = msr_components['stn2']\n else:\n stn2 = None\n\n ignore = msr_components['ignore']\n\n cardinal_list.append(msr_components['cardinal'])\n msr_list.append(float(msr_components['msr']))\n adj_list.append(float(msr_components['adj']))\n cor_list.append(float(msr_components['cor']))\n msr_sd_list.append(float(msr_components['msr_sd']))\n adj_sd_list.append(float(msr_components['adj_sd']))\n cor_sd_list.append(float(msr_components['cor_sd']))\n nstat_list.append(float(msr_components['nstat']))\n if tstat_switch:\n tstat_list.append(float(msr_components['tstat']))\n else:\n tstat_list.append(None)\n pelzer_list.append(float(msr_components['pelzer']))\n pre_adj_cor_list.append(float(msr_components['pre_adj_cor']))\n outlier_list.append(msr_components['outlier'])\n\n if (three_line_count % 3) == 0:\n msr_count += 1\n\n msr_object = DynaAdj.Measurement(\n msr_type=msr_type,\n stn1=stn1,\n stn2=stn2,\n stn3=None,\n ignore=ignore,\n cardinal=cardinal_list,\n msr=msr_list,\n adj=adj_list,\n cor=cor_list,\n msr_sd=msr_sd_list,\n adj_sd=adj_sd_list,\n cor_sd=cor_sd_list,\n nstat=nstat_list,\n tstat=tstat_list,\n pelzer=pelzer_list,\n pre_adj_cor=pre_adj_cor_list,\n outlier=outlier_list,\n msr_id=msr_id,\n cluster_id=cluster_id\n )\n\n msrs[msr_count] = msr_object\n else:\n pass\n\n # all other msr types\n else:\n msr_count += 1\n if msr_type in angle_msrs:\n msr = str_to_dms_angle(msr_components['msr'])\n adj = str_to_dms_angle(msr_components['adj'])\n elif msr_type in hp_msrs:\n msr = gc.hp2dec(msr_components['msr'])\n adj = gc.hp2dec(msr_components['adj'])\n else:\n msr = float(msr_components['msr'])\n adj = float(msr_components['adj'])\n\n if tstat_switch:\n tstat = float(msr_components['tstat'])\n else:\n tstat = None\n\n msr_object = DynaAdj.Measurement(\n msr_type=msr_type,\n stn1=msr_components['stn1'],\n stn2=msr_components['stn2'],\n stn3=msr_components['stn3'],\n ignore=msr_components['ignore'],\n cardinal=msr_components['cardinal'],\n msr=msr,\n adj=adj,\n cor=float(msr_components['cor']),\n msr_sd=float(msr_components['msr_sd']),\n adj_sd=float(msr_components['adj_sd']),\n cor_sd=float(msr_components['cor_sd']),\n nstat=float(msr_components['nstat']),\n tstat=tstat,\n pelzer=float(msr_components['pelzer']),\n pre_adj_cor=float(msr_components['pre_adj_cor']),\n outlier=msr_components['outlier'],\n msr_id=msr_components['msr_id'],\n cluster_id=msr_components['cluster_id']\n )\n\n msrs[msr_count] = msr_object\n\n return stns, msrs, metadata, file_name, file_date, adj_stats", "def eam_alloy_to_adp(alloy: EAMAlloy) -> ADP:\n # Load parameter file\n if not isinstance(alloy, EAMAlloy):\n alloy = EAMAlloy(alloy)\n \n # Initialize fs object\n adp = ADP()\n \n # Copy over header\n adp.header = alloy.header\n\n # Copy over r\n adp.set_r(num=alloy.numr, cutoff=alloy.cutoffr, delta=alloy.deltar)\n\n # Copy over rho\n adp.set_rho(num=alloy.numrho, cutoff=alloy.cutoffrho, delta=alloy.deltarho)\n \n for i, symbol in enumerate(alloy.symbols):\n \n # Copy over symbol info\n adp.set_symbol_info(**alloy.symbol_info(symbol))\n \n for i, symbol in enumerate(alloy.symbols):\n \n # Copy over F(rho)\n adp.set_F_rho(symbol, table=alloy.F_rho(symbol))\n \n # Copy over rho(r)\n adp.set_rho_r(symbol, table=alloy.rho_r(symbol))\n \n # Copy over r*phi(r)\n for symbol2 in alloy.symbols[:i+1]:\n symbolpair = [symbol, symbol2]\n adp.set_rphi_r(symbolpair, table=alloy.rphi_r(symbolpair))\n \n # Set u(r) and w(r) to all zeros\n adp.set_u_r(symbolpair, table=np.zeros_like(adp.r))\n adp.set_w_r(symbolpair, table=np.zeros_like(adp.r))\n \n return adp", "def calc_dda(self, feedrate, spm):\n\n second_const = 60\n micro_second_const = 1000000\n #dda = micro_second_const / (feedrate * spm)\n dda = second_const * micro_second_const / (feedrate * spm) #Assuming feedrate in mm/min\n return dda", "def _transfer_adp(self):\n toleratedAtoms = []\n for atom in self['exp'].atoms:\n tolerated = atom.transfer_adp()\n if tolerated:\n toleratedAtoms.append(tolerated)\n for atom in toleratedAtoms:\n atom.averageADP()", "def read_dip(fname, verbose=None):\n dipole = read_dipole(fname)\n return (dipole.times * 1000., dipole.pos, dipole.amplitude,\n 1e9 * dipole.ori * dipole.amplitude[:, np.newaxis], dipole.gof)", "def read_formatted_data(self, meas_name=''):\n if meas_name:\n self.selected_measure = meas_name\n else:\n meas_name = self.selected_measure\n\n is_big_endian = self._pna.data_endianess == 'big'\n data_request = 'CALCulate{}:DATA? FDATA'.format(self._channel)\n if self._pna.data_format == 'REAL,+32':\n data = self._pna.query_binary_values(data_request, datatype='f',\n is_big_endian=is_big_endian,\n container=np.ndarray)\n\n elif self._pna.data_format == 'REAL,+64':\n data = self._pna.query_binary_values(data_request, datatype='d',\n is_big_endian=is_big_endian,\n container=np.ndarray)\n\n elif self._pna.data_format == 'ASC,+0':\n data = self._pna.query_ascii_values(data_request, converter='f',\n container=np.ndarray)\n\n else:\n raise InstrIOError(cleandoc('''Agilent PNA did not return the\n channel {} formatted data for meas {}'''.format(\n self._channel, meas_name)))\n\n return data", "def average_dn_ds(self):\n results_exist = []\n self.dNdS = {}\n self.dN = {}\n self.dS = {}\n \n ## check what outputs there are\n for alg in ['clus', 'mus', 'coff']:\n if os.path.isfile(self.paml_output(alg)):\n results_exist.append(alg)\n self.dNdS['average'] = 'undetermined'\n \n ## for each of those results, grab the dNdS table\n tots = 0\n maxDS = []\n for algnr in results_exist:\n ## find the BEB sites from the paml results file\n with open(self.paml_output(algnr), 'r') as f:\n capture = False\n self.dNdS[algnr] = {'maxDS' : [], 'avDS' : [], 'maxDN' : [], 'avDN' : [], 'maxDNDS' : [], 'avDNDS' : []}\n for line in f.readlines():\n if 'Use runmode = -2 for ML pairwise comparison.)' in line:\n capture = True\n \n elif 'Model 7: beta (10 categories)' in line:\n capture = False\n \n if capture:\n if len(line) > 0 and \"Use runmode\" not in line:\n line = line.split() # makes this into a list of all items\n for ele in line:\n if '(' in ele and len(ele) > 0:\n dn = float(ele.split('(')[1])\n self.dNdS[algnr]['avDN'].append(dn)\n if ')' in ele and len(ele) > 0:\n ds = float(ele.split(')')[0])\n self.dNdS[algnr]['avDS'].append(ds)\n if '(' not in ele and ')' not in ele and not any(c.isalpha() for c in ele) and '-2' not in str(ele) and '=' not in str(ele):\n ## this next bit is to take care of an error in PAML where \n ## dN values = 0 caused the pairwise dN/dS to evaluate as \n ## -1\n if '-1' in ele:\n ele = '0'\n self.dNdS[algnr]['avDNDS'].append(float(ele))\n\n \n if debug:\n print('pairwise dnds vales for ' + algnr + ': ' + str(self.dNdS[algnr]))\n\n ## get the average dNdS for each aligner that was run\n if len(self.dNdS[algnr]['avDNDS']) > 0:\n ## get the max value for each\n self.dNdS[algnr]['maxDNDS'] = str(max(self.dNdS[algnr]['avDNDS']))\n th = 0\n for x in self.dNdS[algnr]['avDNDS']:\n th += float(x)\n \n self.dNdS[algnr]['avDNDS'] = str(th / len(self.dNdS[algnr]['avDNDS'])) \n else:\n self.dNdS[algnr]['avDNDS'] = 'NA'\n \n ## for dS\n if len(self.dNdS[algnr]['avDS']) > 0:\n self.dNdS[algnr]['maxDS'] = max(self.dNdS[algnr]['avDS'])\n else:\n self.dNdS[algnr]['maxDS'] = 'NA'\n\n if debug:\n print('Average dNdS for ' + algnr + ': ' + str(self.dNdS[algnr]['avDNDS']))\n\n ## add to the total\n if self.dNdS[algnr]['avDNDS'] != 'NA':\n tots += float(self.dNdS[algnr]['avDNDS'])\n if self.dNdS[algnr]['maxDS'] != 'NA':\n maxDS.append(self.dNdS[algnr]['maxDS'])\n \n ## remove anything that equals NA\n temp_list = []\n for algnr in results_exist:\n if self.dNdS[algnr]['avDNDS'] != 'NA':\n temp_list.append(algnr)\n results_exist = temp_list\n\n ## if none exist, set the output to NA\n if len(results_exist) < 1:\n self.dNdS['average'] = 'NA'\n self.dNdS['maxDS'] = 'NA'\n\n\n ## after, average all the aligners together\n if self.dNdS['average'] != 'NA':\n self.dNdS['average'] = tots / len(results_exist)\n self.dNdS['maxDS'] = max(maxDS)\n if debug:\n print('made it through dNdS')", "def getRes(ACX, sampRat, d):\n t = 1 / float(sampRat)\n\n Ta = np.linspace(0.04, 6, 597)\n\n dataLen = len(ACX)\n pLen = len(Ta)\n\n ACmax = np.zeros(pLen)\n VEmax = np.zeros(pLen)\n DImax = np.zeros(pLen)\n\n DI = np.zeros(dataLen)\n VE = np.zeros(dataLen)\n AC = np.zeros(dataLen)\n\n for j, T1 in enumerate(Ta):\n w0 = 2 * np.pi / T1\n U1 = (1 / (w0 * np.sqrt(1 - d ** 2))) * np.sin((w0 * np.sqrt(1 - d ** 2)) * t)\n U2 = np.cos(w0 * np.sqrt(1 - d ** 2) * t)\n v = np.exp(-d * w0 * t)\n r1 = (2 * (d ** 2) - 1) / (w0 ** 2 * t)\n r2 = 2 * d / (w0 ** 3 * t)\n r3 = d / (w0 ** 2 * t)\n r4 = d / w0\n k1 = w0 ** 2 * (1 - d ** 2) * U1 + d * w0 * U2\n k2 = r2 + r3 * t / d\n k3 = U2 - d * w0 * U1\n k4 = r1 + r4\n a1 = v * (d * w0 * U1 + U2)\n a2 = v * U1\n a3 = -w0 ** 2 * (v * U1)\n a4 = v * k3\n b1 = v * (U1 * k4 + U2 * k2) - r2\n b2 = -v * (U1 * r1 + U2 * r2) - t * r3 / d + r2\n b3 = v * (k3 * k4 - k1 * k2) + r3 / d\n b4 = -v * (r1 * k3 - r2 * k1) - r3 / d\n DI[0] = 0\n VE[0] = -ACX[0] * t\n AC[0] = 2 * d * w0 * ACX[0] * t\n for i in range(1, dataLen - 1):\n DI[i + 1] = a1 * DI[i] + a2 * VE[i] + b1 * ACX[i] + b2 * ACX[i + 1]\n VE[i + 1] = a3 * DI[i] + a4 * VE[i] + b3 * ACX[i] + b4 * ACX[i + 1]\n AC[i + 1] = -2 * d * w0 * VE[i + 1] - w0 ** 2 * DI[i + 1]\n\n ACmax[j] = np.max(np.abs(AC))\n VEmax[j] = np.max(np.abs(VE))\n DImax[j] = np.max(np.abs(DI))\n\n return ACmax, VEmax, DImax, Ta", "def read_apr(self, lexclude=[], discontinuity=None, rename=None, verbose=False):\n###############################################################################\n \n import pyacs.lib.astrotime\n from pyacs.sol.gpoint import Gpoint\n\n # DEAL WITH RENAME IF PROVIDED\n \n if rename is not None:\n \n if verbose:print(\"-- Rename info provided for apr file: \", self.name)\n\n H_rename = {}\n\n # Case for a CODE rename applying for all SINEX files\n if 'all' in rename:\n \n for (code, new_code) in rename['all']:\n H_rename[code] = new_code\n \n # Case for a CODE rename applying for the current SINEX\n \n if self.name in list(rename.keys()):\n\n for (code, new_code) in rename[self.name]:\n H_rename[code] = new_code\n \n # READING APR FILE\n \n if verbose:\n print('-- Reading Globk apr file ', self.name)\n\n try:\n APR_VALUE = np.genfromtxt(self.name, comments='#', usecols=(1,2,3,4,5,6,7,8,9,10,11,12,12))\n APR_NAME = np.genfromtxt(self.name, comments='#', usecols=(0), dtype=str)\n except:\n print('!!!ERROR: could not read Globk format apr file:' , self.name)\n import sys\n sys.exit()\n \n for i in np.arange( APR_VALUE.shape[0]) :\n print('-- processing ', APR_NAME[i][:4])\n [x,y,z,sx,sy,sz,epoch, vx,vy,vz,svx,svy,svz]= APR_VALUE[i,:]\n M=Gpoint(X=x,Y=y,Z=z,\\\n SX=sx,SY=sy,SZ=sz,\\\n VX=vx,VY=vy,VZ=vz,SVX=svx,SVY=svy,SVZ=svz, \\\n epoch=epoch,code=APR_NAME[i][:4],pt='A',soln=1)\n \n self.estimates[ APR_NAME[i][:4], 1 ] = M", "def _read_arf(file):\n with fits.open(file) as hdul:\n data = hdul[1].data\n\n return data['energ_lo'], data['energ_hi'], data['specresp']", "def read_vmdas(self,):\n fd = self.f\n # The raw files produced by VMDAS contain a binary navigation data\n # block.\n self.cfg['sourceprog'] = 'VMDAS'\n ens = self.ensemble\n k = ens.k\n if self._source != 1 and self._debug_level >= 1:\n print(' \\n***** Apparently a VMDAS file \\n\\n')\n self._source = 1\n self.vars_read += ['time_gps',\n 'latitude_gps',\n 'longitude_gps',\n 'etime_gps',\n 'elatitude_gps',\n 'elongitude_gps',\n 'flags',\n 'ntime', ]\n utim = fd.read_ui8(4)\n date = tmlib.datetime(utim[2] + utim[3] * 256, utim[1], utim[0])\n # This byte is in hundredths of seconds (10s of milliseconds):\n time = tmlib.timedelta(milliseconds=(int(fd.read_ui32(1) / 10)))\n fd.seek(4, 1) # \"PC clock offset from UTC\" - clock drift in ms?\n ens.time_gps[k] = tmlib.date2epoch(date + time)[0]\n ens.latitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.longitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.etime_gps[k] = tmlib.date2epoch(date + tmlib.timedelta(\n milliseconds=int(fd.read_ui32(1) * 10)))[0]\n ens.elatitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.elongitude_gps[k] = fd.read_i32(1) * self._cfac\n fd.seek(12, 1)\n ens.flags[k] = fd.read_ui16(1)\n fd.seek(6, 1)\n utim = fd.read_ui8(4)\n date = tmlib.datetime(utim[0] + utim[1] * 256, utim[3], utim[2])\n ens.ntime[k] = tmlib.date2epoch(date + tmlib.timedelta(\n milliseconds=int(fd.read_ui32(1) / 10)))[0]\n fd.seek(16, 1)\n self._nbyte = 2 + 76", "def read(self):\n\n # create empty DAC data array\n\n dacData = numarray.zeros((calConstant.NUM_TEM, calConstant.NUM_ROW, calConstant.NUM_END,\r\n calConstant.NUM_FE), numarray.UInt8)\n\n # find <fle_dac> elements\n\n doc = self.getDoc()\n\n latList = doc.xpath('.//LATC_XML')\n latLen = len(latList)\n if latLen != 1:\n raise calFileReadExcept, \"found %d <LATC_XML> elements, expected 1\" % latLen\n temList = latList[0].xpath('.//TEM')\n temLen = len(temList)\n if temLen > 16:\n raise calFileReadExcept, \"found %d <TEM> elements, expected <= 16\" % temLen\n for t in temList:\n tem = int(t.getAttributeNS(None, 'ID'))\n if tem < 0 or tem > 16:\n raise calFileReadExcept, \"<TEM> ID attribute value %d, expected (0 - 15)\" % tem\n cccList = t.xpath('.//CCC')\n cccLen = len(cccList)\n if cccLen > 4:\n raise calFileReadExcept, \"found %d <CCC> elements, expected <= 4\" % cccLen\n for c in cccList:\n ccc = int(c.getAttributeNS(None, 'ID'))\n if ccc < 0 or ccc > 3:\n raise calFileReadExcept, \"<CCC> ID attribute value %d, expected (0 - 3)\" % ccc\n rcList = c.xpath('.//CRC')\n rcLen = len(rcList)\n if rcLen > 4:\n raise calFileReadExcept, \"found %d <CRC> elements, expected <= 4\" % rcLen\n for r in rcList:\n rc = int(r.getAttributeNS(None, 'ID'))\n if rc < 0 or rc > 3:\n raise calFileReadExcept, \"<CRC> ID attribute value %d, expected (0 - 3)\" % rc\n feList = r.xpath('.//CFE')\n feLen = len(feList)\n if feLen > 12:\n raise calFileReadExcept, \"found %d <CFE> elements, expected <= 12\" % feLen\n for f in feList:\n fe = int(f.getAttributeNS(None, 'ID'))\n if fe < 0 or fe > 11:\n raise calFileReadExcept, \"<CFE> ID attribute value %d, expected (0 - 11)\" % fe\n dacList = f.xpath('.//%s' % self.__dacName)\n dacLen = len(dacList)\n if dacLen != 1:\n if dacLen == 0:\n continue\n else:\n raise calFileReadExcept, \"found %d %s elements, expected 1\" % (dacLen, self.__dacName)\n d = dacList[0]\n dd = d.childNodes[0]\n dac = int(dd.data.strip(), 16)\n (row, end) = ccToRow(ccc, rc)\n dacData[tem, row, end, fe] = dac\n\n return dacData", "def get_all_DLP_measurements(self):\n pass", "def explore_FAAM_aerosol_data():\n # -- PCASP\n dsPCASP = get_FAAM_mineral_dust_calibration(instrument='PCASP',\n rtn_values=False)\n # -- CDP\n dsCDP = get_FAAM_mineral_dust_calibration(instrument='CDP',\n rtn_values=False)\n # only consider \"potential dust\" above a certain size?\n # Use 100 um for now", "def get_DP(vcffile):\n print(\"Start read vcf file...\")\n try:\n VariantFile\n except NameError:\n from pysam import VariantFile\n\n vcf_in = VariantFile(vcffile)\n DP_list = []\n\n for rec in vcf_in.fetch():\n DP_list.append(rec.info[\"DP\"])\n\n return DP_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the ADP after reflection on the plane defined by its normal vector 'planev'.
def reflect_adp(adp, planev): M = np.identity(4) M[:3, :3] -= 2.0 * np.outer(planev, planev) M[:3, 3] = (2.0 * np.dot(np.array([0, 0, 0]), planev)) * planev return rotate_adp(adp, M[:3, :3])
[ "def GetPlane(plane):\r\n pass", "def get_adp_from_calc(vx, vy, vz):\n ## lx=np.linalg.norm(vx)\n ## ly=np.linalg.norm(vy)\n ## lz=np.linalg.norm(vz)\n lx = vx\n ly = vy\n lz = vz\n L = np.matrix([[lx, 0, 0],\n [0, ly, 0],\n [0, 0, lz]])\n\n\n ## Vx=vx/lx\n ## Vy=vy/ly\n ## Vz=vz/lz\n Vx = np.array([1, 0, 0])\n Vy = np.array([0, 1, 0])\n Vz = np.array([0, 0, 1])\n V = np.matrix([[Vx[0], Vy[0], Vz[0]],\n [Vx[1], Vy[1], Vz[1]],\n [Vx[2], Vy[2], Vz[2]]])\n Vinv = np.linalg.inv(V)\n #print V,Vinv\n M = np.dot(np.dot(Vinv, L), V)\n #print M\n return M", "def test_antinormal_reflection(self):\n n1 = 1.0\n n2 = 1.5\n normal = (0.0, 0.0, -1.0)\n angle = 0.0\n ray = Ray(position=(0.0, 0.0, 0.0), direction=(0.0, 0.0, 1.0), wavelength=None)\n fresnel = FresnelReflection()\n assert np.isclose(fresnel.reflectivity(angle, n1, n2), 0.04)\n new_ray = fresnel.transform(ray, {\"normal\": normal})\n assert np.allclose(flip(ray.direction), new_ray.direction)", "def plane(self):\r\n from lsst.analysis import utils\r\n return utils.fitplane(self.points, self.z)", "def plane(self):\n return self._plane", "def plot_plane(unit_normal, x_array, y_array, fore):\n # print'unit normal = ', unit_normal\n z = (((unit_normal[0] * (fore[0] - x_array)) + (unit_normal[1] * (fore[1] - y_array))) / unit_normal[2]) + fore[2]\n # print 'plane numbers\\n', z\n return z", "def normal(plane):\n return plane[:3].copy()", "def Plane(self, *args):\n return _GeomPlate.GeomPlate_BuildAveragePlane_Plane(self, *args)", "def getDistanceFromPlane(self, point):\n d = math.fabs((self.planeEquation[0] * point[0] + self.planeEquation[1] * point[1] + self.planeEquation[2] *\n point[2] + self.planeEquation[3]))\n e = math.sqrt(self.planeEquation[0] ** 2 + self.planeEquation[1] ** 2 + self.planeEquation[2] ** 2)\n distance = d/e\n return distance", "def alpha_n(Vm): \n return (0.01 * (10.0 - Vm)) / (np.exp(1.0 - (0.1 * Vm)) - 1.0)", "def invert_normal(plane):\n # flip the normal, and the distance\n return -plane", "def plane(self):\n return plane(self.N, self.o)", "def defautPlaneite(alpha,nouveauPoint):\n ecart=[]\n for point in nouveauPoint:\n ecart.append(point[2]-(alpha[0]+alpha[1]*point[1]-alpha[2]*point[0]))\n defaut=max(ecart)-min(ecart)\n return defaut", "def get_real_pwv(pwv, altitude):\n zenith_angle = 90-altitude\n airmass = 1/np.cos(zenith_angle*np.pi/180)\n return pwv*airmass", "def _reflectedDirection(velocity, reflect_point, reflect_origin, outside=True):\n # Here we do reflection based on input velocity direction relative to the\n # tangent of the intersection point with the \"safety\" circle. If \"outside\"\n # parameter is true, then the reflected direction will be left of the\n # tangent, otherwise reflection will be right of tangent\n vel_ang = np.arctan2(velocity[1], velocity[0])\n tan_ang = _angleWrap(\n np.arctan2(reflect_point[1] - reflect_origin[1], reflect_point[0] -\n reflect_origin[0]) + np.pi / 2)\n direction = -1 if outside else 1\n return _angleWrap(tan_ang +\n direction * np.abs(_angleWrap(vel_ang - tan_ang)))", "def reflect_line_plane(line, plane, epsilon=1e-6):\n intx_pt = intersection_line_plane(line, plane, epsilon)\n if not intx_pt:\n return None\n vec_line = subtract_vectors(line[1], line[0])\n vec_reflect = mirror_vector_vector(vec_line, plane[1])\n if angle_smallest_vectors(plane[1], vec_reflect) > 0.5 * pi:\n return None\n return [intx_pt, add_vectors(intx_pt, vec_reflect)]", "def calc_vertical_angle(our_lat, our_lon, our_alt, plane_lat, plane_lon, plane_alt):\n\n d_alt = plane_alt - our_alt\n distance = calc_distance(our_lat, our_lon, plane_lat, plane_lon)\n return math.degrees(math.atan(d_alt/distance))", "def reflectivity(self, surface, ray, geometry, container, adjacent):\n # Get the surface normal to determine which surface has been hit.\n normal = geometry.normal(ray.position)\n \n # Normal are outward facing\n TOP_SURFACE = (0, 0, 1)\n \n # If a ray hits the top surface where x > 0 and y > 0 reflection\n # set the reflectivity to 1.\n if np.allclose(normal, TOP_SURFACE):\n x, y = ray.position[0], ray.position[1]\n if x > 0 and y > 0:\n return 1.0\n \n # Otherwise return the Frensel reflection probability.\n return super(PartialTopSurfaceMirror, self).reflectivity(surface, ray, geometry, container, adjacent) # opt-out of handling custom reflection", "def on_plane(self, plane):\n v = plane.point - self\n return 0 == v.mag2 or plane.normal.isperpendicular(v)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates an ADP in its matrix representation from the three principle axis representing the displacement ellipsoid. The three principle axis of the ellipsoid are needed as arguments. A Matrix representation of the ADP is returned.
def get_adp_from_calc(vx, vy, vz): ## lx=np.linalg.norm(vx) ## ly=np.linalg.norm(vy) ## lz=np.linalg.norm(vz) lx = vx ly = vy lz = vz L = np.matrix([[lx, 0, 0], [0, ly, 0], [0, 0, lz]]) ## Vx=vx/lx ## Vy=vy/ly ## Vz=vz/lz Vx = np.array([1, 0, 0]) Vy = np.array([0, 1, 0]) Vz = np.array([0, 0, 1]) V = np.matrix([[Vx[0], Vy[0], Vz[0]], [Vx[1], Vy[1], Vz[1]], [Vx[2], Vy[2], Vz[2]]]) Vinv = np.linalg.inv(V) #print V,Vinv M = np.dot(np.dot(Vinv, L), V) #print M return M
[ "def A_coefficients_ellipsoid(v, DD, bDDisDelta=False):\n #v can be given as an array with X/Y/Z cartesian dimensions being the last.\n #\"\"\"\n if bDDisDelta:\n delta=DD\n else:\n delta=Ddelta_ellipsoid(dd)\n #v=_sanitise_v(v)\n #v2=np.square(v)\n #v4=np.square(v2)\n #fact2=np.multiply(0.75,np.sum(v4))-0.25\n v2 = [ v[i]*v[i] for i in range(3) ]\n v4 = [ v2[i]*v2[i] for i in range(3) ]\n fact2 = 0.25*( 3.0*(v4[0]+v4[1]+v4[2])-1.0)\n fact3 = 1.0/12.0*(delta[0]*(3*v4[0]+6*v2[1]*v2[2]-1) + delta[1]*(3*v4[1]+6*v2[0]*v2[2]-1) + delta[2]*(3*v4[2]+6*v2[0]*v2[1]-1))\n A=np.zeros(5)\n A[0]= 3*v2[1]*v2[2]\n A[1]= 3*v2[0]*v2[2]\n A[2]= 3*v2[0]*v2[1]\n A[3]= fact2-fact3\n A[4]= fact2+fact3\n return A", "def c1(adp1, adp2):\n\n def get_axis(adp):\n \"\"\"\n Returns ADP as its three principle axis representation.\n :param adp: List/Array type of length 6.\n :returns: List of three arrays of length 3.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n w, v = np.linalg.eig(adp)\n return [np.array((w[j] * v[:, j]).flatten().tolist()[0]) for j \\\n in xrange(3)]\n\n adp1_axis = get_axis(adp1)\n adp2_axis = get_axis(adp2)\n\n val = 0\n for i in xrange(3):\n addval = abs(norm(adp1_axis[i] - adp2_axis[i]))\n addval = addval * abs((1 - abs(np.dot(adp1_axis[i], adp2_axis[i]))))\n val += addval\n return val", "def calc_main_axis(self):\n #Clarify why the above step has been done\n c0, c1, c2 = self.calc_geom_center ()\n M = numpy.zeros ((3, 3), dtype=float)\n M = [[0] * 3, [0] * 3, [0] * 3]\n for x in self.atomcoords:\n xi = x[0] - c0\n yi = x[1] - c1\n zi = x[2] - c2\n M[0][0] = M[0][0] + xi * xi\n M[0][1] = M[0][1] + xi * yi\n M[0][2] = M[0][2] + xi * zi\n M[1][1] = M[1][1] + yi * yi\n M[1][2] = M[1][2] + yi * zi\n M[2][2] = M[2][2] + zi * zi\n M[1][0] = M[0][1]\n M[2][0] = M[0][2]\n M[2][1] = M[1][2]\n M = numpy.array (M)\n d = sum (numpy.diag (M))\n M = -M\n M[0, 0] = M[0, 0] + d\n M[1, 1] = M[1, 1] + d\n M[2, 2] = M[2, 2] + d\n\n eigenVals, eigenVecs = numpy.linalg.eig (M)\n eigenVecs = eigenVecs.transpose ()\n return eigenVecs", "def reflect_adp(adp, planev):\n M = np.identity(4)\n M[:3, :3] -= 2.0 * np.outer(planev, planev)\n M[:3, 3] = (2.0 * np.dot(np.array([0, 0, 0]), planev)) * planev\n\n return rotate_adp(adp, M[:3, :3])", "def get_A3():\n\n return array([[0.68557183+0.46550108j, 0.12934765-0.1622676j,\n 0.24409518+0.25335939j],\n [0.1531015 + 0.66678983j, 0.45112492+0.18206976j,\n -0.02633966+0.43477693j],\n [-0.10817164-1.16879196j, -0.18446849+0.03755672j,\n 0.06430325-0.44757084j]])", "def matrix_exp_pade3(matrix, multiplication_rule=None):\n b = [120.0, 60.0, 12.0]\n b = [tf.constant(x, matrix.dtype) for x in b]\n ident = tf.linalg.eye(\n tf.shape(matrix)[-2],\n batch_shape=tf.shape(matrix)[:-2],\n dtype=matrix.dtype)\n matrix_2 = tf.linalg.matmul(matrix, matrix)\n tmp = matrix_2 + b[1] * ident\n matrix_u = tf.linalg.matmul(matrix, tmp)\n matrix_v = b[2] * matrix_2 + b[0] * ident\n return matrix_u, matrix_v", "def get_affine(self):\n # AFNI default is RAI- == LPS+ == DICOM order. We need to flip RA sign\n # to align with nibabel RAS+ system\n affine = np.asarray(self.info['IJK_TO_DICOM_REAL']).reshape(3, 4)\n affine = np.row_stack((affine * [[-1], [-1], [1]],\n [0, 0, 0, 1]))\n return affine", "def __convert(args):\n a, b, zone, ellipsoid, datum, inverse = args\n projection = Proj(\"+proj=utm +zone={}, +ellps={} +datum={} +units=m +no_defs\".format(zone, ellipsoid, datum))\n c, d = projection(a, b, inverse=inverse)\n\n return c, d", "def spherical_pentagon_area(pointa,pointb,pointc,pointd,pointe):\n\n\n #print(angle_A)\n\n norm = np.cross(pointa,pointb)\n norm_ab = norm/np.linalg.norm(norm)\n norm = np.cross(pointb,pointc)\n norm_bc = norm/np.linalg.norm(norm)\n norm = np.cross(pointc,pointd)\n norm_cd = norm/np.linalg.norm(norm)\n norm = np.cross(pointd,pointe)\n norm_de = norm/np.linalg.norm(norm)\n norm = np.cross(pointe,pointa)\n norm_ea = norm/np.linalg.norm(norm)\n\n mat_a = np.asarray([pointe,pointa,pointb])\n mat_b = np.asarray([pointa,pointb,pointc])\n mat_c = np.asarray([pointb,pointc,pointd])\n mat_d = np.asarray([pointc,pointd,pointe])\n mat_e = np.asarray([pointd,pointe,pointa])\n print(np.sign(np.linalg.det(mat_a)))\n print(np.sign(np.linalg.det(mat_b)))\n print(np.sign(np.linalg.det(mat_c)))\n print(np.sign(np.linalg.det(mat_d)))\n print(np.sign(np.linalg.det(mat_e)))\n angle_A = np.sign(np.linalg.det(mat_a))*np.arccos(-np.dot(norm_ea,norm_ab))\n angle_B = np.sign(np.linalg.det(mat_b))*np.arccos(-np.dot(norm_ab,norm_bc))\n angle_C = np.sign(np.linalg.det(mat_c))*np.arccos(-np.dot(norm_bc,norm_cd))\n angle_D = np.sign(np.linalg.det(mat_d))*np.arccos(-np.dot(norm_cd,norm_de))\n angle_E = np.sign(np.linalg.det(mat_e))*np.arccos(-np.dot(norm_de,norm_ea))\n #print(angle_A)\n #print(angle_B)\n #print(angle_C)\n #print(angle_D)\n #print(angle_E)\n #print()\n\n #angle_A = np.arcsin(np.linalg.norm(\n # np.cross(np.cross(pointa,pointb),np.cross(pointa,pointe)))/(\n # np.linalg.norm(np.cross(pointa,pointb))*\n # np.linalg.norm(np.cross(pointa,pointe))))\n #angle_B = np.arcsin(np.linalg.norm(\n # np.cross(np.cross(pointb,pointc),np.cross(pointb,pointa)))/(\n # np.linalg.norm(np.cross(pointb,pointc))*\n # np.linalg.norm(np.cross(pointb,pointa))))\n #angle_C = np.arcsin(np.linalg.norm(\n # np.cross(np.cross(pointc,pointd),np.cross(pointc,pointb)))/(\n # np.linalg.norm(np.cross(pointc,pointd))*\n # np.linalg.norm(np.cross(pointc,pointb))))\n #angle_D = np.arcsin(np.linalg.norm(\n # np.cross(np.cross(pointd,pointe),np.cross(pointd,pointc)))/(\n # np.linalg.norm(np.cross(pointd,pointe))*\n # np.linalg.norm(np.cross(pointd,pointc))))\n #angle_E = np.arcsin(np.linalg.norm(\n # np.cross(np.cross(pointe,pointa),np.cross(pointe,pointd)))/(\n # np.linalg.norm(np.cross(pointe,pointa))*\n # np.linalg.norm(np.cross(pointe,pointd))))\n\n #print(angle_A)\n #print(angle_B)\n #print(angle_C)\n #print(angle_D)\n #print(angle_E)\n #print(angle_A + angle_B + angle_C + angle_D + angle_E)\n #print(3*np.pi)\n #quit()\n area = angle_A + angle_B + angle_C + angle_D + angle_E - 3*np.pi\n print(area)\n\n return area", "def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()/self.pbaspect[0]\n ymin, ymax = self.get_ylim3d()/self.pbaspect[1]\n zmin, zmax = self.get_zlim3d()/self.pbaspect[2]\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0 \n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates \n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down \n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M", "def v_eta_operator(self):\n I = np.identity(self.N)\n i = (0+1j)\n delta = self.D[1] - self.alpha**2 * I\n Z = np.zeros((self.N, self.N))\n\n CD = np.matrix(np.diag(self.aCD))\n dCD = np.matrix(np.diag(self.daCD))\n U = np.matrix(np.diag(self.U))\n D1 = np.matrix(self.D[0])\n D2 = np.matrix(self.D[1])\n D4 = np.matrix(self.D[3])\n\n dU = np.matrix(np.diag(self.dU))\n ddU = np.matrix(np.diag(self.ddU))\n\n if self.option['equation'] == 'Euler':\n self.A = U * delta - np.diag(self.ddU)\n self.B = delta\n elif self.option['equation'] == 'Euler_CD':\n self.A = U * delta - np.diag(self.ddU)\\\n - (i/self.alpha) * (dCD*U*D1 + CD*dU*D1 + CD*U*D2)\n self.B = delta\n elif self.option['equation'] == 'Euler_CD_turb':\n print (\"not implemented yet\")\n elif self.option['equation'] == 'LNS':\n self.A = (i/(self.alpha*self.Re)) *\\\n (D4 +I*self.alpha**4 -2*self.alpha**2 *D2)\\\n - ddU + U*delta\n self.B = delta\n elif self.option['equation'] == 'LNS_CD':\n self.A = (i/(self.alpha*self.Re)) *\\\n (delta)**2\\\n - ddU + U*delta - (i/self.alpha) *\\\n (dCD*U*D1 + CD*dU*D1 + CD*U*D2)\n self.B = delta\n elif self.option['equation'] == 'LNS_turb':\n print (\"not implemented yet\")\n elif self.option['equation'] == 'LNS_turb_CD':\n print (\"not implemented yet\")\n elif self.option['equation'] == 'Euler_wave':\n # in this case the B.C. is of 2nd order in omega so the matrix\n # problem should be reorganized see the article of Jerome\n # Hoepffner for details in the trick to transform polynomial\n # eigenvalue problem in a single one\n self.A = np.dot(np.diag(self.U), delta) - np.diag(self.ddU)\n self.B = delta\n self.C = Z\n\n A1 = np.concatenate((self.A, Z), axis=1)\n A2 = np.concatenate((Z, I), axis=1)\n self.A = np.concatenate((A1, A2))\n\n B1 = np.concatenate((self.B, self.C), axis=1)\n B2 = np.concatenate((I, Z), axis=1)\n self.B = np.concatenate((B1, B2))\n\n self.A_noBC = np.copy(self.A)\n self.B_noBC = np.copy(self.B)\n #self.A_noBC = self.A\n #self.B_noBC = self.B\n\n if self.option['equation'] == 'Euler':\n self.BC2()\n elif self.option['equation'] == 'Euler_CD':\n self.BC2()\n elif self.option['equation'] == 'Euler_CD_turb':\n print (\"not implemented yet\")\n elif self.option['equation'] == 'LNS':\n self.BC1()\n elif self.option['equation'] == 'LNS_CD':\n self.BC1()\n elif self.option['equation'] == 'LNS_turb':\n print (\"not implemented yet\")\n elif self.option['equation'] == 'LNS_turb_CD':\n print (\"not implemented yet\")\n elif self.option['equation'] == 'Euler_wave':\n self.BC_wave_v_eta()", "def get_matrix(adp):\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n return adp", "def dotProduct(self, *args):\n return _almathswig.Position3D_dotProduct(self, *args)", "def principal_axis(alpha_carbons):\n # alpha carbons coordinates as a numpy array\n coord = numpy.array(alpha_carbons, float)\n\n # get geometrical center\n center = numpy.mean(coord, 0)\n coord = coord - center\n\n # create inertia matrix and extract eigenvectors and values\n inertia = numpy.dot(coord.transpose(), coord)\n e_values, e_vectors = numpy.linalg.eig(inertia)\n\n # sort eigenvalues\n order = numpy.argsort(e_values)\n\n # axis1 is the principal axis with the greatest eigenvalue\n _, _, axis1 = e_vectors[:, order].transpose()\n\n axis_direction = axis1 / numpy.linalg.norm(axis1)\n\n return center, axis_direction", "def equation(self):\n mat = np.empty((self.nunknowns, self.model.neq))\n # rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n rhs = self.hc.copy()\n for icp in range(self.ncp):\n istart = icp * self.nlayers\n # rhs[istart:istart+self.nlayers] = self.pc[]\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = \\\n e.potinflayers(self.xc[icp], self.yc[icp], self.layers) / self.aq.Tcol[self.layers]\n if e == self:\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] -= self.resfac[icp]\n ieq += e.nunknowns\n else:\n rhs[istart:istart + self.nlayers] -= \\\n e.potentiallayers(self.xc[icp], self.yc[icp], self.layers) / self.aq.T[\n self.layers] # Pretty cool that this works, really\n return mat, rhs", "def getEulerAngles(self, axes='sxyz'):\n\n M = self.getMatrix()\n\n try:\n firstaxis, parity, repetition, frame = AXES_TO_TUPLE[axes.lower()]\n except (AttributeError, KeyError):\n TUPLE_TO_AXES[axes] # validation\n firstaxis, parity, repetition, frame = axes\n\n i = firstaxis\n j = NEXT_AXIS[i+parity]\n k = NEXT_AXIS[i-parity+1]\n\n a = numpy.empty((3, ))\n\n if repetition:\n sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k])\n if sy > EPS:\n a[0] = math.atan2( M[i, j], M[i, k])\n a[1] = math.atan2( sy, M[i, i])\n a[2] = math.atan2( M[j, i], -M[k, i])\n else:\n a[0] = math.atan2(-M[j, k], M[j, j])\n a[1] = math.atan2( sy, M[i, i])\n a[2] = 0.0\n else:\n cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i])\n if cy > EPS:\n a[0] = math.atan2( M[k, j], M[k, k])\n a[1] = math.atan2(-M[k, i], cy)\n a[2] = math.atan2( M[j, i], M[i, i])\n else:\n a[0] = math.atan2(-M[j, k], M[j, j])\n a[1] = math.atan2(-M[k, i], cy)\n a[2] = 0.0\n\n if parity:\n a[0], a[1], a[2] = -a[0], -a[1], -a[2]\n if frame:\n a[0], a[2] = a[2], a[0]\n return a", "def projectionMatrix(v):\n\tv = correctType(v)\n\treturn np.outer(v,v)", "def adjugate_matrix(self, determinant, transposed_cofactor):\n if transposed_cofactor.__class__.__name__ != \"Matrix3\":\n raise TypeError(self._ERRORS[0])\n\n r1 = transposed_cofactor.row_1\n r2 = transposed_cofactor.row_2\n r3 = transposed_cofactor.row_3\n\n r1[0] /= determinant\n r1[1] /= determinant\n r1[2] /= determinant\n\n r2[0] /= determinant\n r2[1] /= determinant\n r2[2] /= determinant\n\n r3[0] /= determinant\n r3[1] /= determinant\n r3[2] /= determinant\n\n return Matrix3(r1, r2, r3)", "def anl_solution(self):\r\n\r\n m = float(self.mass) / self.nu_m\r\n qe = 1 / self.nu_m * (self.nu_t * self.nu_t / self.nu_x) * 1.0 \\\r\n / float(self.size_tick * self.size_tick)\r\n print 'qE=', qe\r\n c = self.light_vel\r\n for i in range(0, len(self.obs.obt_g)):\r\n ddt = float(self.obs.obt[i] - self.obs.obt[i - 1])\r\n x = m * c ** 2 / qe * (math.sqrt(1.0 + (qe * self.t[i] / (m\r\n * c)) ** 2) - 1.0)\r\n self.xa_track.append(x)\r\n p = qe * self.t[i]\r\n self.pa.append(p)\r\n v = p / math.sqrt(m ** 2 + (p / c) ** 2)\r\n jv = self.t[i] * qe / (m * c)\r\n v = math.sqrt(jv * jv / (1 + jv * jv)) * c\r\n self.va.append(v)\r\n print 'Analytical solution of the differential equation of motion'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines the the quaternion representing the best possible transformation of two coordinate systems into each other using a least sqare approach. This function is used by the get_refined_rotation() function.
def get_best_quaternion(coordlist1, coordlist2): M = np.matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]]) if len(coordlist1) <= len(coordlist2): number = len(coordlist1) else: number = len(coordlist2) for i in xrange(number): aaa = np.matrix(np.outer(coordlist1[i], coordlist2[i])) M = M + aaa N11 = float(M[0][:, 0] + M[1][:, 1] + M[2][:, 2]) N22 = float(M[0][:, 0] - M[1][:, 1] - M[2][:, 2]) N33 = float(-M[0][:, 0] + M[1][:, 1] - M[2][:, 2]) N44 = float(-M[0][:, 0] - M[1][:, 1] + M[2][:, 2]) N12 = float(M[1][:, 2] - M[2][:, 1]) N13 = float(M[2][:, 0] - M[0][:, 2]) N14 = float(M[0][:, 1] - M[1][:, 0]) N21 = float(N12) N23 = float(M[0][:, 1] + M[1][:, 0]) N24 = float(M[2][:, 0] + M[0][:, 2]) N31 = float(N13) N32 = float(N23) N34 = float(M[1][:, 2] + M[2][:, 1]) N41 = float(N14) N42 = float(N24) N43 = float(N34) N = np.matrix([[N11, N12, N13, N14], [N21, N22, N23, N24], [N31, N32, N33, N34], [N41, N42, N43, N44]]) values, vectors = np.linalg.eig(N) w = list(values) quat = vectors[:, w.index(max(w))] quat = np.array(quat).reshape(-1, ).tolist() return quat, max(w)
[ "def find_best_rotation(q1, q2, allow_reflection = False, only_xy = False):\n if q1.ndim != 2 or q2.ndim != 2:\n raise Exception(\"This only supports curves of shape (N,M) for N dimensions and M samples\")\n\n n = q1.shape[0]\n\n # if only_xy, strip everything but the x and y coordinates of q1 and q2\n if only_xy:\n _q1 = q1[0:2, :]\n _q2 = q2[0:2, :]\n else:\n _q1 = q1\n _q2 = q2\n\n _n = _q1.shape[0]\n A = _q1@_q2.T\n U, s, Vh = svd(A)\n S = eye(_n)\n\n # if reflections are not allowed and the determinant of A is negative,\n # then the entry corresponding to the smallest singular value is negated\n # as in the Kabsch algorithm\n if det(A) < 0 and not allow_reflection:\n S[-1, -1] = -1 # the last entry of the matrix becomes -1\n\n _R = U@S@Vh # optimal\n \n # if only_xy, the top left block of the matrix is _R and the rest is identity matrix\n if only_xy:\n R = eye(n)\n R[0:2, 0:2] = _R\n else:\n R = _R\n \n q2new = R@q2\n\n return (q2new, R)", "def get_optimal_rotation_and_translation(x1, x2):\n t = get_optimal_translation(x1, x2)\n x1 = x1 - jnp.mean(x1, axis=0)\n x2 = x2 - jnp.mean(x2, axis=0)\n return get_optimal_rotation(x1, x2), t", "def rotation_only(q_1: Q, h: Q) -> Q:\n h_4_rotation = vector_q(h)\n return rotation_and_or_boost(q_1, h_4_rotation)", "def next_rotation(q_1: Q, q_2: Q) -> Q:\n q_1.check_representations(q_2)\n\n if not math.isclose(q_1.t, q_2.t):\n raise ValueError(f\"Oops, to be a rotation, the first values must be the same: {q_1.t} != {q_2.t}\")\n\n if not math.isclose(norm_squared(q_1).t, norm_squared(q_2).t):\n raise ValueError(f\"Oops, the norm squared of these two are not equal: {norm_squared(q_1).t} != {norm_squared(q_2).t}\")\n\n next_rot = product(q_1, q_2)\n v_abs_q_1 = abs_of_vector(q_1).t\n next_vector_normalized = normalize(vector_q(next_rot), v_abs_q_1)\n next_vector_normalized.t = q_1.t\n\n return next_vector_normalized", "def shortRot(v1,v2):\n v1 /= np.sqrt(np.dot(v1,v1))\n v2 /= np.sqrt(np.dot(v2,v2))\n if np.sum(v1-v2) < 1.e-6: return quaternion()\n c = np.cross(v1,v2)\n f = dot(v1,v2)\n s = np.sqrt((1.0+d)*2.0)\n q = quaternion(c/s)\n q[0] = s / 2.0;\n return q", "def setup_s_matrix(dq_1, dq_2):\n scalar_parts_1 = dq_1.scalar()\n scalar_parts_2 = dq_2.scalar()\n\n assert np.allclose(\n scalar_parts_1.dq, scalar_parts_2.dq,\n atol=5e-2), (\n \"\\ndq1:\\n{},\\nscalar_parts_1:\\n{},\\ndq2:\\n{},\\nscalar_parts_2:\\n{}\\n\"\n \"Scalar parts should always be equal.\".format(dq_1, scalar_parts_1, dq_2,\n scalar_parts_2))\n\n s_matrix = np.zeros([6, 8])\n s_matrix[0:3, 0:3] = skew_from_vector(dq_1.q_rot.q[0:-1] + dq_2.q_rot.q[0:-1])\n s_matrix[0:3, 3] = dq_1.q_rot.q[0:-1] - dq_2.q_rot.q[0:-1]\n s_matrix[3:6, 0:3] = skew_from_vector(dq_1.q_dual.q[0:-1] +\n dq_2.q_dual.q[0:-1])\n s_matrix[3:6, 3] = dq_1.q_dual.q[0:-1] - dq_2.q_dual.q[0:-1]\n s_matrix[3:6, 4:7] = skew_from_vector(dq_1.q_rot.q[0:-1] + dq_2.q_rot.q[0:-1])\n s_matrix[3:6, 7] = dq_1.q_rot.q[0:-1] - dq_2.q_rot.q[0:-1]\n # print(\"S: \\n{}\".format(s_matrix))\n\n rank_s_matrix = np.linalg.matrix_rank(s_matrix)\n assert rank_s_matrix <= 6, s_matrix\n return s_matrix.copy()", "def addquaternion(q1, q2):\n assert numpy.allclose(math.sqrt(numpy.dot(q1,q1)), 1.0)\n assert numpy.allclose(math.sqrt(numpy.dot(q2,q2)), 1.0)\n \n x1, y1, z1, w1 = q1\n x2, y2, z2, w2 = q2\n\n x = w1*x2 + x1*w2 + y1*z2 - z1*y2\n y = w1*y2 + y1*w2 + z1*x2 - x1*z2\n z = w1*z2 + z1*w2 + x1*y2 - y1*x2\n w = w1*w2 - x1*x2 - y1*y2 - z1*z2\n q = numpy.array((x, y, z, w), float)\n \n ## normalize quaternion\n q = q / math.sqrt(numpy.dot(q,q))\n assert numpy.allclose(math.sqrt(numpy.dot(q,q)), 1.0)\n \n return q", "def quat_diff(q1, q2):\n q1 = np.asarray(q1)\n if np.dot(q1, q2) < 0:\n # Quaternions have opposite handedness, flip q1 since it's already an ndarray\n q1 = -1 * q1\n q_inv = q1 * np.array([1.0, -1.0, -1.0, -1.0])\n q_inv = q_inv / np.dot(q_inv, q_inv)\n\n # We only coare about the scalar component, compose only that\n z0 = q_inv[0] * q2[0] - q_inv[1] * q2[1] - q_inv[2] * q2[2] - q_inv[3] * q2[3]\n return 2 * float(np.arccos(min(1, max(-1, z0))))", "def _pick_closest_quaternion(quaternion, target_quaternion):\n if (np.linalg.norm(-quaternion - target_quaternion) <\n np.linalg.norm(quaternion - target_quaternion)):\n return -quaternion\n return quaternion", "def compute_error_minimizing_rotation(Points1, Points2):\r\n #TODO: implement me\r\n\r\n H_1_1 = 0\r\n H_1_2 = 0\r\n H_2_1 = 0\r\n H_2_2 = 0\r\n\r\n for t in range(1, len(Points1)):\r\n H_1_1 = H_1_1 + (Points1[t][0] * Points2[t][0])\r\n H_1_2 = H_1_2 + (Points1[t][1] * Points2[t][0])\r\n H_2_1 = H_2_1 + (Points1[t][0] * Points2[t][1])\r\n H_2_2 = H_2_2 + (Points1[t][1] * Points2[t][1])\r\n\r\n H = [[H_1_1,H_1_2],[H_2_1,H_2_2]]\r\n\r\n U, S, V = numpy.linalg.svd(H)\r\n\r\n V = numpy.transpose(V)\r\n\r\n R_1_1 = (U[0][0] * V[0][0]) +((U[0][1] * V[1][0]))\r\n R_1_2 = (U[0][0] * V[0][1]) +((U[0][1] * V[1][1]))\r\n R_2_1 = (U[1][0] * V[0][0]) +((U[1][1] * V[1][0]))\r\n R_2_2 = (U[1][0] * V[0][1]) +((U[1][1] * V[1][1]))\r\n\r\n R = [[R_1_1,R_1_2],[R_2_1,R_2_2]]\r\n\r\n return R", "def is_same_quaternion(q0, q1):\r\n q0 = numpy.array(q0)\r\n q1 = numpy.array(q1)\r\n return numpy.allclose(q0, q1) or numpy.allclose(q0, -q1)", "def quaternion_product(q1, q2):\r\n Wa = q1[0]\r\n Wb = q2[0]\r\n Xa = q1[1]\r\n Xb = q2[1]\r\n Ya = q1[2]\r\n Yb = q2[2]\r\n Za = q1[3]\r\n Zb = q2[3]\r\n x = Xa * Wb + Ya * Zb - Za * Yb + Wa * Xb\r\n y = -Xa * Zb + Ya * Wb + Za * Xb + Wa * Yb\r\n z = Xa * Yb - Ya * Xb + Za * Wb + Wa * Zb\r\n w = -Xa * Xb - Ya * Yb - Za * Zb + Wa * Wb\r\n return [w, x, y, z]", "def quaternion_from_two_vectors(self, v0: np.array, v1: np.array) -> np.quaternion:\n\n # if v0[0] == 0.0 and v0[1] == 0.0 and v0[2] == 0.0:\n # pass\n # else:\n # v0 = v0 / np.linalg.norm(v0)\n v0 = v0 / np.linalg.norm(v0)\n v1 = v1 / np.linalg.norm(v1)\n c = v0.dot(v1)\n # Epsilon prevents issues at poles.\n if c < (-1 + EPSILON):\n c = max(c, -1)\n m = np.stack([v0, v1], 0)\n _, _, vh = np.linalg.svd(m, full_matrices=True)\n axis = vh.T[:, 2]\n w2 = (1 + c) * 0.5\n w = np.sqrt(w2)\n axis = axis * np.sqrt(1 - w2)\n return np.quaternion(w, *axis)\n axis = np.cross(v0, v1)\n s = np.sqrt((1 + c) * 2)\n return np.quaternion(s * 0.5, *(axis / s))", "def qdist(q1: np.ndarray, q2: np.ndarray) -> float:\n _quaternions_guard_clauses(q1, q2)\n q1, q2 = np.copy(q1), np.copy(q2)\n if q1.ndim == 1:\n q1 /= np.linalg.norm(q1)\n q2 /= np.linalg.norm(q2)\n if np.allclose(q1, q2) or np.allclose(-q1, q2):\n return 0.0\n return min(np.linalg.norm(q1-q2), np.linalg.norm(q1+q2))\n q1 /= np.linalg.norm(q1, axis=1)[:, None]\n q2 /= np.linalg.norm(q2, axis=1)[:, None]\n return np.r_[[np.linalg.norm(q1-q2, axis=1)], [np.linalg.norm(q1+q2, axis=1)]].min(axis=0)", "def quaternion_multiplication(self, q1, q2):\n\n # Unpack these quaternions\n a_scalar, a_vecx, a_vecy, a_vecz = torch.unbind(q1,\n dim=-1)\n b_scalar, b_vecx, b_vecy, b_vecz = torch.unbind(q2,\n dim=-1)\n\n r_scalar = a_scalar * b_scalar - a_vecx * b_vecx - a_vecy * b_vecy - a_vecz * b_vecz\n r_vecx = a_scalar * b_vecx + a_vecx * b_scalar + a_vecy * b_vecz - a_vecz * b_vecy\n r_vecy = a_scalar * b_vecy + a_vecy * b_scalar + a_vecz * b_vecx - a_vecx * b_vecz\n r_vecz = a_scalar * b_vecz + a_vecz * b_scalar + a_vecx * b_vecy - a_vecy * b_vecx\n\n \"\"\"\n a = torch.randn([2, 3, 4])\n b = torch.randn([2, 3, 4])\n print(a) # 2 matrices of size 3 x 4\n print(b) # 2 matrices of size 3 x 4\n print(torch.stack([a, b])) # 4 matrices of size 3 x 4, first a, then b\n \"\"\"\n return torch.stack(\n [r_scalar, r_vecx, r_vecy, r_vecz],\n dim=-1\n )", "def compute_subspace_angles(S1, S2):\n # Check the if the input arrays are 1D or 2D\n if S1.ndim == 1:\n # mat1 = np.reshape(S1, (1,S1.size))\n mat1 = np.reshape(S1, (S1.size, 1))\n elif S1.ndim == 2:\n mat1 = S1\n else:\n raise ValueError('The function is intended only to handle 1D and 2D numpy arrays')\n if S2.ndim == 1:\n # mat2 = np.reshape(S2, (1,S2.size))\n mat2 = np.reshape(S2, (S2.size, 1))\n elif S2.ndim == 2:\n mat2 = S2\n else:\n raise ValueError('The function is intended only to handle 1D and 2D numpy arrays')\n\n\n # Do a QR Factorization of S1 and S2\n Q1, R1 = np.linalg.qr(mat1)\n # print('S1 = \\n', S1)\n # print('Q1 = \\n', Q1)\n Q2, R2 = np.linalg.qr(mat2)\n # print('S1 = \\n', S2)\n # print('Q2 = \\n', Q2)\n intmat = np.matmul(Q1.T, Q2)\n # print('intmat = \\n', intmat)\n Y, s, Z = np.linalg.svd(intmat)\n # print('Y = \\n', Y)\n # print('U = \\n', np.matmul(Q1, Y))\n # print('V = \\n', np.matmul(Q2, Y))\n # print('s = \\n', s)\n\n # NaN prevention check\n indices = np.where(s > 1) # Get the indices where the violation exisits\n for entry in indices: # Loop over these indices to fix the violation\n for i in entry:\n if s[i] - 1 < 1.e-13: # This violation limit is pulled out of thin air!\n s[i] = 1.0\n\n s_radians = np.arccos(s)\n\n return s_radians", "def quaternion_equal(v1=None, v2=None): # real signature unknown; restored from __doc__\n return 0", "def slerp(q1: Quaternion, q2: Quaternion, t: float):\n q1 = q1.normalize()\n q2 = q2.normalize()\n dot = q1.dot(q2)\n\n if dot < 0.0:\n # Quaternions have opposite-handedness, so slerp won't take the shortest path unless one is negated.\n q2 = -q2\n dot = -dot\n\n dot = min(1.0, max(0.0, dot))\n theta = math.acos(dot) * t\n q3 = q2 - q1 * dot\n return q1 * math.cos(theta) + q3 * math.sin(theta)", "def multiply(self, other: 'RotationQuaterion') -> 'RotationQuaterion':\n\n # TODO replace this with your own code\n #quaternion_product = solution.quaternion.RotationQuaterion.multiply(\n # self, other)\n\n eta_a = self.real_part\n epsilon_a = self.vec_part\n\n eta_b = other.real_part\n epsilon_b = other.vec_part\n epsilon_cross_prod = get_cross_matrix(epsilon_a) @ epsilon_b\n\n quatprod_real_part = eta_a*eta_b - epsilon_a@epsilon_b\n quatprod_vec_part = eta_b * epsilon_a + eta_a * epsilon_b + epsilon_cross_prod\n\n quaternion_product = RotationQuaterion(quatprod_real_part, quatprod_vec_part)\n\n return quaternion_product" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the rotation matrix equivalent of the given quaternion. This function is used by the get_refined_rotation() function.
def get_rotation_matrix_from_quaternion(q): R = np.matrix([[q[0] * q[0] + q[1] * q[1] - q[2] * q[2] - q[3] * q[3], 2 * (q[1] * q[2] - q[0] * q[3]), 2 * (q[1] * q[3] + q[0] * q[2])], [2 * (q[2] * q[1] + q[0] * q[3]), q[0] * q[0] - q[1] * q[1] + q[2] * q[2] - q[3] * q[3], 2 * (q[2] * q[3] - q[0] * q[1])], [2 * (q[3] * q[1] - q[0] * q[2]), 2 * (q[3] * q[2] + q[0] * q[1]), q[0] * q[0] - q[1] * q[1] - q[2] * q[2] + q[3] * q[3]]]) return R
[ "def quaternion_to_rotation_matrix(q):\n\n # Original C++ method ('SetQuaternionRotation()') is defined in\n # pba/src/pba/DataInterface.h.\n # Parallel bundle adjustment (pba) code (used by visualsfm) is provided\n # here: http://grail.cs.washington.edu/projects/mcba/\n qq = math.sqrt(q[0] * q[0] + q[1] * q[1] + q[2] * q[2] + q[3] * q[3])\n if qq > 0: # Normalize the quaternion\n qw = q[0] / qq\n qx = q[1] / qq\n qy = q[2] / qq\n qz = q[3] / qq\n else:\n qw = 1\n qx = qy = qz = 0\n m = np.zeros((3, 3), dtype=float)\n m[0][0] = float(qw * qw + qx * qx - qz * qz - qy * qy)\n m[0][1] = float(2 * qx * qy - 2 * qz * qw)\n m[0][2] = float(2 * qy * qw + 2 * qz * qx)\n m[1][0] = float(2 * qx * qy + 2 * qw * qz)\n m[1][1] = float(qy * qy + qw * qw - qz * qz - qx * qx)\n m[1][2] = float(2 * qz * qy - 2 * qx * qw)\n m[2][0] = float(2 * qx * qz - 2 * qy * qw)\n m[2][1] = float(2 * qy * qz + 2 * qw * qx)\n m[2][2] = float(qz * qz + qw * qw - qy * qy - qx * qx)\n return m", "def quaternion_matrix(quaternion):\n _EPS = numpy.finfo(float).eps * 4.0\n\n q = numpy.array(quaternion, dtype=numpy.float64, copy=True)\n n = numpy.dot(q, q)\n if n < _EPS:\n return numpy.identity(4)\n q *= math.sqrt(2.0 / n)\n q = numpy.outer(q, q)\n return numpy.array([\n [1.0 - q[2, 2] - q[3, 3], q[1, 2] - q[3, 0], q[1, 3] + q[2, 0], 0.0],\n [q[1, 2] + q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] - q[1, 0], 0.0],\n [q[1, 3] - q[2, 0], q[2, 3] + q[1, 0], 1.0 - q[1, 1] - q[2, 2], 0.0],\n [0.0, 0.0, 0.0, 1.0]])", "def quaternion_matrix(quaternion):\n q = np.array(quaternion, dtype=np.float64, copy=True)\n n = np.dot(q, q)\n if n < _EPS:\n return np.identity(4)\n q *= math.sqrt(2.0 / n)\n q = np.outer(q, q)\n return np.array([\n [1.0 - q[2, 2] - q[3, 3], q[1, 2] - q[3, 0], q[1, 3] + q[2, 0]],\n [q[1, 2] + q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] - q[1, 0]],\n [q[1, 3] - q[2, 0], q[2, 3] + q[1, 0], 1.0 - q[1, 1] - q[2, 2]]\n ])", "def quaternion_matrix(quaternion):\r\n q = numpy.array(quaternion, dtype=numpy.float64, copy=True)\r\n n = numpy.dot(q, q)\r\n if n < _EPS:\r\n return numpy.identity(4)\r\n q *= math.sqrt(2.0 / n)\r\n q = numpy.outer(q, q)\r\n return numpy.array([\r\n [1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0],\r\n [ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0],\r\n [ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0],\r\n [ 0.0, 0.0, 0.0, 1.0]])", "def quaternion_matrix(quaternion):\n q = np.array(quaternion, dtype=np.float64, copy=True)\n n = np.dot(q, q)\n if n < _EPS:\n return np.identity(4)\n q *= math.sqrt(2.0 / n)\n q = np.outer(q, q)\n return np.array([\n [1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0],\n [ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0],\n [ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0],\n [ 0.0, 0.0, 0.0, 1.0]])", "def mat4_from_quat(quaternion):\n q = np.array(quaternion, dtype=np.float64, copy=True)\n n = np.dot(q, q)\n if n < _EPS:\n return np.identity(4)\n q *= sqrt(2.0 / n)\n q = np.outer(q, q)\n return np.array([\n [1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0],\n [ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0],\n [ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0],\n [ 0.0, 0.0, 0.0, 1.0]]).transpose()", "def rmatrixquaternion(q):\n assert numpy.allclose(math.sqrt(numpy.dot(q,q)), 1.0)\n \n x, y, z, w = q\n\n xx = x*x\n xy = x*y\n xz = x*z\n xw = x*w\n yy = y*y\n yz = y*z\n yw = y*w\n zz = z*z\n zw = z*w\n\n r00 = 1.0 - 2.0 * (yy + zz)\n r01 = 2.0 * (xy - zw)\n r02 = 2.0 * (xz + yw)\n\n r10 = 2.0 * (xy + zw)\n r11 = 1.0 - 2.0 * (xx + zz) \n r12 = 2.0 * (yz - xw)\n\n r20 = 2.0 * (xz - yw)\n r21 = 2.0 * (yz + xw)\n r22 = 1.0 - 2.0 * (xx + yy)\n\n R = numpy.array([[r00, r01, r02],\n [r10, r11, r12],\n [r20, r21, r22]], float)\n \n assert numpy.allclose(linalg.determinant(R), 1.0)\n return R", "def rotationFromQuaternion(*args):\n return _almathswig.rotationFromQuaternion(*args)", "def quat_rotation_matrix(q_w, q_x, q_y, q_z) -> np.array:\n return np.array([[1-2*(q_y**2+q_z**2), 2*(q_x*q_y-q_z*q_w), 2*(q_x*q_z + q_y*q_w)],\n [2*(q_x*q_y + q_z*q_w), 1-2 *\n (q_x**2+q_z**2), 2*(q_y*q_z - q_x*q_w)],\n [2*(q_x*q_z-q_y*q_w), 2*(q_y*q_z+q_x*q_w), 1-2*(q_x**2+q_y**2)]], dtype=object)", "def Rotation_fromQuaternion(*args):\n return _almathswig.Rotation_fromQuaternion(*args)", "def rotmat(p, q):\n rot = numpy.dot(refmat(q, -p), refmat(p, -p))\n return rot", "def _get_rotation(self, elem: MjcfElement) -> Quaternion:\n if _is_worldbody(elem) or elem.axisangle is None:\n return quaternions.qeye()\n axisangle = elem.axisangle\n return quaternions.axangle2quat(axisangle[0:3],\n self._maybe_to_radian(axisangle[3]))", "def get_rotation_as_quaternion(self):\n return self._quaternion", "def rotationMatrix(self):\n\n R = Compute3DRotationMatrix(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],\n self.exteriorOrientationParameters[5])\n\n return R", "def quaternion(self):\n q = np.zeros((4,), dtype=np.float64)\n q = quaternion_from_euler(self.rot.x, self.rot.y, self.rot.z, \"rxyz\")\n q /= math.sqrt(q[0]*q[0] + q[1]*q[1] + q[2]*q[2] + q[3]*q[3])\n return q", "def quaternion_to_angle(self, q):\n\tx, y, z, w = q.x, q.y, q.z, q.w\n\troll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n\treturn yaw", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def quaternion_from_rotmat(r):\n assert r.shape == (3,3)\n\n w_sq = (1 + r[0, 0] + r[1, 1] + r[2, 2]) / 4\n x_sq = (1 + r[0, 0] - r[1, 1] - r[2, 2]) / 4\n y_sq = (1 - r[0, 0] + r[1, 1] - r[2, 2]) / 4\n z_sq = (1 - r[0, 0] - r[1, 1] + r[2, 2]) / 4\n\n w = np.sqrt(w_sq)\n x = math.copysign(np.sqrt(x_sq), r[2, 1] - r[1, 2])\n y = math.copysign(np.sqrt(y_sq), r[0, 2] - r[2, 0])\n z = math.copysign(np.sqrt(z_sq), r[1, 0] - r[0, 1])\n return np.array([w, x, y, z])", "def get_rotation_as_rotation_mat(self):\n return self._rotation_mat" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the geometrical center of a set of points.
def get_geom_center(coordlist): return sum(coordlist) / len(coordlist)
[ "def get_center(points):\r\n tot_x = 0\r\n tot_y = 0\r\n for point in points:\r\n tot_x += point[0]\r\n tot_y += point[1]\r\n\r\n x = int(tot_x / len(points))\r\n y = int(tot_y / len(points))\r\n return x, y", "def center(self):\n center_lat = (max((x[0] for x in self._points)) + min((x[0] for x in self._points))) / 2\n center_lng = (max((x[1] for x in self._points)) + min((x[1] for x in self._points))) / 2\n return center_lat, center_lng", "def centroid(points):\n points = numpy.array(points)\n num, dim = points.shape\n return numpy.add.reduce(points)/float(num)", "def pointcenter(x):\n return point(x)", "def center(self):\n if not hasattr(self, '_center'):\n self._center = np.unique(self.points, axis=0).mean(axis=0)\n return self._center", "def _find_centroid(points):\n\n x = [p[0] for p in points]\n y = [p[1] for p in points]\n n = len(points)\n \n centroid = [sum(x)/float(n),sum(y)/float(n)]\n\n return centroid", "def get_centre(self):\n # just get the centroid\n # perhaps try something like:\n # https://github.com/mapbox/polylabel/blob/master/polylabel.js\n # in the future\n coords = np.array([(n.x, n.y) for n in self.nodes])\n centre_x = coords[:, 0].mean()\n centre_y = coords[:, 1].mean()\n return centre_x, centre_y", "def getCenter(self):\n return Point.average(self.points)", "def centroid(self):\n A = 1 / (6*self.area)\n cx,cy = 0,0\n for ind in xrange(-1, len(self.vertices)-1):\n pi = self.vertices[ind]\n pii = self.vertices[ind+1]\n v = pi[0]*pii[1]-pii[0]*pi[1]\n cx += v*(pi[0] + pii[0])\n cy += v*(pi[1] + pii[1])\n return Point(simplify(A*cx), simplify(A*cy))", "def get_center(self):\n lon, lat = self.coordinates\n\n dimx = lon.shape[0]\n dimy = lon.shape[1]\n \n return (lon[dimx/2][dimy/2],lat[dimx/2][dimy/2])", "def center_coords(self):\n coords = set()\n for x in range(self.radius, self.container.width - self.radius):\n for y in range(self.radius, self.container.height - self.radius):\n coords.add((x, y))\n\n return coords", "def get_center_point(self):\n # midpoint formula\n return (self.box[0] + self.box[2]) * 0.5, (self.box[1] + self.box[3]) * 0.5", "def find_center(centers):\n return np.mean(centers, axis = 0)", "def incenter(self):\n s = self.sides\n v = self.vertices\n A,B,C = v[0],v[1],v[2]\n a,b,c = s[1].length,s[2].length,s[0].length\n x = simplify((a*A[0] + b*B[0] + c*C[0]) / (a+b+c))\n y = simplify((a*A[1] + b*B[1] + c*C[1]) / (a+b+c))\n return Point(x, y)", "def centroid(x,y):\n gx = np.mean(x)\n gy = np.mean(y)\n\n return gx,gy", "def centroid_points(points):\n p = float(len(points))\n x, y, z = zip(*points)\n return sum(x) / p, sum(y) / p, sum(z) / p", "def ComputeCentroid(self, vtkPoints, int_tuple, p_float=..., p_float=..., p_float=...):\n ...", "def compute_platform_center(self):\n base = self.platform_vertices[1] - self.platform_vertices[0] # base of triangle, vector\n x = np.linalg.norm(base) # base length, scalar\n m = self.platform_vertices[0] + base/2 # midpoint on the base, vector\n cm = x/(2*np.sqrt(3)) # length from m to center c, scalar\n cm_dir = self.platform_vertices[2] - m # direction to center from midpoint, vector\n cm_vec = cm_dir*cm/np.linalg.norm(cm_dir) # make cm_dir a unit vector and multiply by the length, vector\n c = m + cm_vec # center position, vector\n return c", "def get_center(self):\n return Point(int(self.x + self.width * 0.5), int(self.y + self.height * 0.5))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Moves the geometrical center of the atoms in atomlist to the given point.
def move_center_to_point(atomlist, point): for atom in range(len(atomlist)): atomlist[atom] = atomlist[atom] - point return atomlist
[ "def recenter(self, point=(0, 0)):\n self.center = Point(*point)", "def centerOn(self, point):\n rect = self.rect()\n x = point.x() - rect.width() / 2.0\n y = point.y() - rect.height() / 2.0\n \n self.setPos(x, y)", "def set_frame_center(self, point):\n self.frame_center.move_to(point)", "def centerOnPoint(self, point):\n\n inClass = point.__class__.__name__.lower()\n # check if we've been passed an OpenCV Point2f object\n if inClass == 'point2f':\n # convert the Point2f object to a simple list\n point = QPointF(point.x, point.y)\n\n # check if we've been passed a list\n elif inClass == 'list':\n # convert the Point2f object to a simple list\n point = QPointF(point[0], point[1])\n\n self.isZooming = True\n self.centerPoint = point\n self.centerOn(self.centerPoint)\n self.isZooming = False", "def move_to(self, mobject_or_point):\n layer_center = self.surrounding_rectangle.get_center()\n if isinstance(mobject_or_point, Mobject):\n target_center = mobject_or_point.get_center() \n else:\n target_center = mobject_or_point\n\n self.shift(target_center - layer_center)", "def moveToCenter(self):\n if self.parent is not None:\n bounds = self.parent.bounds(dataBounds=True)\n if bounds is not None:\n center = (bounds[0] + bounds[1]) / 2.\n _logger.debug('Moving plane to center: %s', str(center))\n self.plane.point = center", "def pointcenter(x):\n return point(x)", "def scale_and_center(pointlist, center=False):\n\n flat_pointlist = list_of_pointlists2pointlist(pointlist)\n\n tmp = get_scale_and_center_parameters(flat_pointlist, center)\n factor, addx, addy = tmp[\"factor\"], tmp[\"addx\"], tmp[\"addy\"]\n minx, miny = tmp[\"minx\"], tmp[\"miny\"]\n\n for linenr, line in enumerate(pointlist):\n for key, p in enumerate(line):\n pointlist[linenr][key] = {\n \"x\": (p[\"x\"] - minx) * factor + addx,\n \"y\": (p[\"y\"] - miny) * factor + addy,\n }\n\n return pointlist", "def set_center(self,structure):\n for i,b in enumerate(self.bfs):\n b.set_center( structure[ self.LIST1[i] ] ) \n return", "def recenter(self):\n self.x0 -= self.centroid", "def center_from_tuple(self, center):\n self.center = Point(*center)", "def position_center(self, x, y):\n self.x = x\n self.y = y\n self.pos[0] = x - self.pos[2]/2\n self.pos[1] = y - self.pos[3]/2", "def setCenter(self, c):\n if self.isLocked():\n raise RuntimeError, \"Setting center not allowed - object locked.\"\n _cp = self.__center\n if not isinstance(c, point.Point):\n raise TypeError, \"Invalid center point: \" + `type(c)`\n if _cp is not c:\n _cp.disconnect(self)\n _cp.freeUser(self)\n self.startChange('center_changed')\n self.__center = c\n self.endChange('center_changed')\n self.sendMessage('center_changed', _cp)\n c.connect('moved', self.__movePoint)\n c.connect('change_pending', self.__pointChangePending)\n c.connect('change_complete', self.__pointChangeComplete)\n c.storeUser(self)\n if abs(_cp.x - c.x) > 1e-10 or abs(_cp.y - c.y) > 1e-10:\n self.sendMessage('moved', _cp.x, _cp.y, self.__radius)\n self.modified()", "def update_center(self): \r\n \r\n self.grfx[0].center = self.center\r\n\r\n self.update_bbox()", "def centre_point( bb ):\n return aabb.centre_point( bb )", "def center_stroke(self, offset_x, offset_y):\n for point in self.points:\n point.x -= offset_x\n point.y -= offset_y", "def move_to_center(particles):\n particles.position -= particles.center_of_mass()\n particles.velocity -= particles.center_of_mass_velocity()", "def update_center(self): \r\n \r\n self.update_bbox()", "def setPosition(self, point, space='preTransform'):\n \n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Rotates the adp with its corresponding rotation matrix.
def rotate_adp(adp, rotmat): adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5]), float(adp[2])]]) rotmatT = np.transpose(rotmat) adp = np.dot(rotmatT, adp) adp = np.dot(adp, rotmat) # print '=\n',adp,'\n-------------------------------------------------\n\n\n\n\n\n' adp = np.array(adp).flatten().tolist() return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]
[ "def rotate(mat,angle):\n return np.dot(Mueller.rotator(angle), np.dot(mat, Mueller.rotator(-angle)))", "def rotate(mat,angle):\n return np.dot(Jones.rotator(angle), np.dot(mat, Jones.rotator(-angle)))", "def rotmat(p, q):\n rot = numpy.dot(refmat(q, -p), refmat(p, -p))\n return rot", "def _rotate(self, affine):\n dims = affine.shape[0]\n if not np.isscalar(self.rotation):\n raise Exception('this class requires exactly one entry for rotation!')\n theta = (self.deformrandomstate.rand() - 0.5) * 2 * self.rotation\n if dims == 4:\n\n # sample unit vector:\n u = np.random.random(3)\n u /= np.sqrt(np.sum([uu ** 2 for uu in u]) + 1e-8)\n ct = np.cos(theta)\n st = np.sin(theta)\n rot = np.eye(4)\n rot[:3, :3] = [\n [ct + u[0] ** 2 * (1 - ct), u[0] * u[1] * (1 - ct) - u[2] * st, u[0] * u[2] * (1 - ct) + u[2] * st],\n [u[1] * u[0] * (1 - ct) + u[2] * st, ct + u[1] ** 2 * (1 - ct), u[1] * u[2] * (1 - ct) - u[0] * st],\n [u[2] * u[0] * (1 - ct) - u[1] * st, u[2] * u[1] * (1 - ct) + u[0] * st, ct + u[2] ** 2 * (1 - ct)]]\n\n elif dims == 3:\n rot = np.eye(3)\n rot[:2, :2] = np.asarray([[np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]])\n else:\n raise Exception(\n 'implement this for each dimension, since not yet implemented for dimension {}'.format(dims))\n\n return np.matmul(rot, affine)", "def rotation(self, params):\n return self._rotation(self._convertparams(params, self.arclentoparam))", "def mat4_rotate(angle, direction, point=None):\n sina = sin(angle)\n cosa = cos(angle)\n direction = vec3(direction[:3])\n # rotation matrix around unit vector\n R = np.diag([cosa, cosa, cosa])\n R += np.outer(direction, direction) * (1.0 - cosa)\n direction *= sina\n R += np.array( [[ 0.0, -direction[2], direction[1]],\n [ direction[2], 0.0, -direction[0]],\n [-direction[1], direction[0], 0.0]])\n M = np.identity(4)\n M[:3, :3] = R\n if point is not None:\n # rotation not around origin\n point = np.array(point[:3], dtype=np.float64, copy=False)\n M[:3, 3] = point - np.dot(R, point)\n return M.transpose()", "def apply_rotation(self, eta=0.0, phi=0.0, theta=0.0):\n \n new_rotation_matrix = self.rotation_elements( eta, phi, theta )\n \n #self.rotation_matrix_exp = np.dot( self.rotation_matrix_exp , new_rotation_matrix )\n self.rotation_matrix_exp = np.dot( new_rotation_matrix, self.rotation_matrix_exp )", "def rotation_pt(self, params):\n return self._rotation(self._convertparams(params, self.arclentoparam_pt))", "def rotate_x(p, a=0):\n # turn value to radians\n a = math.radians(a)\n translation_mat = np.matrix([\n [1,0,0,0],\n [0,math.cos(a),math.sin(a),0],\n [0,-math.sin(a),math.cos(a),0],\n [0,0,0,1],\n ], dtype=\"float32\")\n\n new_p = p @ translation_mat\n\n return new_p", "def rotation(self, params):\n pass", "def rotate(self, angle, reshape=False):\n return IntensityMap.rotate(self, angle, reshape=reshape)", "def rotation(self):\n\t\treturn self.piv.a.rotate.v", "def rotate_3D(atom, source_atom):\n from lauescript.cryst.match import get_transform\n\n lst2 = [np.array([0, 0, 0]), source_atom.orientation[0], source_atom.orientation[1]]\n lst1 = [np.array([0, 0, 0]), atom.orientation[0], atom.orientation[1]]\n\n matrix = get_transform(lst1, lst2, matrix=True)\n\n adp = source_atom.adp['cart_int']\n\n atom.adp['cart_int'] = rotate_adp(adp, matrix)", "def rotationFromTransformInPlace(*args):\n return _almathswig.rotationFromTransformInPlace(*args)", "def rotate_ADP_about_axis(ADP, angle, axisDirection):\n adp = get_adp_as_matrix(ADP)\n u, v = np.linalg.eig(adp)\n startPoints = [v[:, i].flatten().tolist()[0] for i in xrange(3)]\n endPoints = [rotate_point_about_axis(point, angle, axisDirection, (0, 0, 0)) for point in startPoints]\n rotMat = get_transform(startPoints, endPoints, matrix=True).transpose()\n newadp = np.dot(rotMat.transpose(), np.dot(adp, rotMat))\n return newadp[0, 0], newadp[1, 1], newadp[2, 2], newadp[0, 1], newadp[0, 2], newadp[1, 2]", "def rotate(self):\n pass", "def applyRotation(*args):\n return _almathswig.applyRotation(*args)", "def transformFromRotation(*args):\n return _almathswig.transformFromRotation(*args)", "def _rotate(self):\n self.image = pg.transform.rotate(self.image, (self._attr[\"direction\"] - self._attr[\"reference\"]))\n self._attr[\"reference\"] = self._attr[\"direction\"]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the normal vector of a plane defined by the points p1,p2 and p3.
def get_normal_vector_of_plane(p1, p2, p3): v12 = np.array(p1) - np.array(p2) v13 = np.array(p1) - np.array(p3) nvec = np.cross(v12, v13) ## print 'norm: '+str(np.linalg.norm(nvec)) return nvec / np.linalg.norm(nvec)
[ "def normal_vector_3p(a: Vector, b: Vector, c: Vector) -> Vector:\n return (b - a).cross(c - a).normalize()", "def normal(plane):\n return plane[:3].copy()", "def surface_normal(points, normalize=True):\n p1 = points[..., 0, :]\n p2 = points[..., 1, :]\n p3 = points[..., 2, :]\n normal = np.cross(p2 - p1, p3 - p1)\n return vg.normalize(normal) if normalize else normal", "def _normal_vector(o, p0_3d, p1_3d):\n # The vector between middle point of v1-v2 and object center location\n # is the normal vector I'm looking for\n vn = p0_3d.lerp(p1_3d, 0.5) - o.matrix_world.translation\n # normalize so I can to length computation on it\n vn.normalize()\n return vn", "def plane_3p(p1, p2, p3):\n v1 = p3 - p1\n v2 = p2 - p1\n cp = np.cross(v1, v2)\n a, b, c = cp\n d = - np.dot(cp, p3)\n return np.array([a, b, c, d])", "def PlaneNormalVector(h, k, l):\r\n vec = np.array([h, k, l])\r\n return vec/np.linalg.norm(vec)", "def normal(self) -> Vector:\n return normalize(cross(self.d1, self.d2))", "def normal(point_one, point_two):\n return numpy.array([point_one[1] - point_two[1], point_two[0] - point_one[0]])", "def GetNormal(self, *args):\n return _itkLineSpatialObjectPointPython.itkLineSpatialObjectPoint3_GetNormal(self, *args)", "def surface_norm(self, pt):\n\n return self.normal.normalize()", "def normalAt(self, p):\n return (p - self.center).normalized()", "def define_plane(a1, a2, a3):\n p1 = np.array(a1.coords())\n p2 = np.array(a2.coords())\n p3 = np.array(a3.coords())\n cp = np.cross(p3 - p1, p2 - p1)\n a, b, c = cp\n d = -np.dot(cp, p3)\n return np.array([a, b, c, d])", "def normal(self):\n normal = np.array(\n [\n self.center[0],\n self.center[1],\n self.center[2]-1,\n self.center[3],\n ]\n )\n return normal", "def normal_triangle(triangle, unitized=True):\n assert len(triangle) == 3, \"Three points are required.\"\n a, b, c = triangle\n ab = subtract_vectors(b, a)\n ac = subtract_vectors(c, a)\n n = cross_vectors(ab, ac)\n if not unitized:\n return n\n lvec = length_vector(n)\n return n[0] / lvec, n[1] / lvec, n[2] / lvec", "def generate_normals(v1, v2, v3, normalize_result=True):\n # make vectors relative to v2\n # we assume opengl counter-clockwise ordering\n a = v1 - v2\n b = v3 - v2\n n = cross(b, a)\n if normalize_result:\n n = normalize(n)\n return n", "def normal_vector(origin, vectors):\n return np.cross(vectors[0] - origin, vectors[1] - origin)", "def vector_norm(vec: carla.Vector3D) -> float:\n return math.sqrt(vec.x**2 + vec.y**2 + vec.z**2)", "def normal_vector(self, facet):\n assert len(facet) == 3\n pos = self.cluster.get_positions()\n v1 = pos[facet[1], :] - pos[facet[0], :]\n v2 = pos[facet[2], :] - pos[facet[0], :]\n n = np.cross(v1, v2)\n length = np.sqrt(np.sum(n**2))\n return n / length", "def project_3d_points_to_plane(points, p1, p2 ,p3, numpoints):\n p1 = np.asarray(p1)\n p2 = np.asarray(p2)\n p3 = np.asarray(p3)\n\n # get vectors in plane\n v1 = p3 - p1\n v2 = p2 - p1\n\n # compute cross product\n cp = np.cross(v1, v2)\n a, b, c = cp # normal to plane is ax + by + cz\n\n # evaluate d\n d = np.dot(cp, p3)\n\n # thus, normal is given by\n plane = vtk.vtkPlane()\n origin = p1\n normal = normalize(np.array([a,b,c]))\n plane.SetOrigin(p1)\n plane.SetNormal(normal)\n\n if numpoints == 1:\n proj = [0,0,0]\n plane.ProjectPoint(points, origin, normal, proj)\n return proj\n else:\n projected_pts = np.zeros((numpoints, 3), dtype=float)\n\n for i in range(numpoints):\n proj = [0,0,0]\n plane.ProjectPoint(points[i], origin, normal, proj)\n projected_pts[i] = proj\n\n return projected_pts" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list where every element is a list of three atomnames. The second and third names are the closest neighbours of the first names. The argument is a list as returned by frac_to_cart and the number of neighbours to be returned.
def get_closest_neighbours(atomlist, neighbours=2): print('atomlist', atomlist) neighbourlist = [] for atom in atomlist: listline = [atom[0][0]] dists = [] distsc = [] for partner in atomlist: dists.append(np.linalg.norm(atom[1] - partner[1])) distsc.append(np.linalg.norm(atom[1] - partner[1])) dists.remove(min(dists)) for _ in range(neighbours): if min(dists) < 2.5: listline.append(atomlist[distsc.index(min(dists))][0][0]) dists.remove(min(dists)) #listline.append(atomlist[distsc.index(min(dists))][0][0]) neighbourlist.append(listline) return neighbourlist
[ "def get_neighbors(a):\n\t\t\tif a == 0: neighbors = [a+1]\n\t\t\telif a == N-1: neighbors = [a-1]\n\t\t\telse: neighbors = [a-1, a+1]\n\t\t\treturn neighbors", "def find_C_with_N_terminals(atoms):\n CNH3_list = []\n for _ in range(len(atoms)):\n name = atoms[_].get_atom_name()\n if name != 'C':\n continue\n nghs_c = atoms[_].get_ngh()\n nums_c, nghs_list_c = parse_atom_nghs(nghs_c, [\"N\"])\n for __ in nghs_list_c[\"N\"]:\n curr_nghs_c = atoms[__].get_ngh()\n curr_nums_c, curr_nghs_list_c = parse_atom_nghs(curr_nghs_c, ['H', \"C\"])\n if curr_nums_c[\"C\"] == 1 and (curr_nums_c[\"H\"] == 3 or curr_nums_c[\"H\"] == 2):\n CNH3_list.append(_)\n if curr_nums_c[\"C\"] == 2:\n for another_c in curr_nghs_list_c[\"C\"]:\n if another_c == _:\n continue\n another_nghs_c = atoms[another_c].get_ngh()\n another_nums_c, another_nghs_list_c = parse_atom_nghs(another_nghs_c, ['H', \"C\", \"O\"])\n if another_nums_c[\"O\"] == 1 and another_nums_c[\"H\"] == 0 and another_nums_c[\"C\"] == 1:\n third_nghs_c = atoms[another_nghs_list_c[\"C\"][0]].get_ngh()\n third_nums_c, third_nghs_list_c = parse_atom_nghs(third_nghs_c, ['H', \"C\", \"O\"])\n if third_nums_c[\"H\"] == 3:\n CNH3_list.append(_)\n return CNH3_list", "def computeNearestNeighbor(itemName, itemVector, items): \n distances = [] \n for otherItem in items: \n if otherItem != itemName: \n distance = manhattan(itemVector, items[otherItem]) \n distances.append((distance, otherItem)) \n # sort based on distance -- closest first \n distances.sort() \n return distances", "def rankNeighbors(Data):\r\n strokeDist = []\r\n for i in range(len(Data)):\r\n strokeDist.append([])\r\n index = 0\r\n for point1 in Data:\r\n dist = []\r\n index1=0\r\n for point2 in Data:\r\n #dist.append(math.sqrt((center1[0]-center2[0])**2+(center1[1]-center2[1])**2))\r\n dist.append((index1,math.sqrt((point1[0]-point2[0])**2+(point1[1]-point2[1])**2+(point1[2]-point2[2])**2)))\r\n index1+=1\r\n #x = copy.deepcopy(dist)\r\n #print(x)\r\n dist.sort(key= lambda x:x[1])\r\n #print(x)\r\n # Get rank for each element\r\n idx1 =0\r\n for e in dist:\r\n #i = x.index(e)\r\n strokeDist[index].append(e)\r\n idx1 +=1\r\n index+=1\r\n return strokeDist", "def computeNearestNeighbor(itemName, itemVector, items):\n # \"Chris Cagle/ I Breathe In. I Breathe Out\" [1, 5, 2.5, 1, 1, 5, 1]\n distances = []\n for otherItem in items:\n if otherItem != itemName:\n # print('itemVector =>', itemVector)\n # print('items[otherItem] =>', items[otherItem])\n distance = manhattan(itemVector, items[otherItem])\n distances.append((distance, otherItem))\n # sort based on distance -- closest first\n distances.sort()\n return distances", "def get_all_neighbor_coords(tiles):\n return [add(tile, neighbor) for tile in tiles for neighbor in NEIGHBORS]", "def find_neighbours(s, adjs):\n return list(\n map(lambda s_: s_['name'],\n filter(lambda s_: s_['name'] != s, adjs)))", "def nearest(self, *others: \"Vec3\", n=1):\n dists = [(self.dist(v), v) for v in others]\n return [pair[1] for pair in sorted(dists)[:n]]", "def getNeighborNodes(self, signature):\n x, y, z = signature[0], signature[1], signature[2]\n return [(x+1, y+1, z+1), (x+1, y, z+1), (x+1, y-1, z+1),\n (x, y+1, z+1), (x, y, z+1), (x, y-1, z+1),\n (x-1, y+1, z+1), (x-1, y, z+1), (x-1, y-1, z+1),\n (x+1, y+1, z-1), (x+1, y, z-1), (x+1, y-1, z-1),\n (x, y+1, z-1), (x, y, z-1), (x, y-1, z-1),\n (x-1, y+1, z-1), (x-1, y, z-1), (x-1, y-1, z-1),\n (x+1, y+1, z), (x+1, y, z), (x+1, y-1, z),\n (x, y+1, z), (x, y, z), (x, y-1, z),\n (x-1, y+1, z), (x-1, y, z), (x-1, y-1, z)]", "def get_contour(atom_list):\n initial = [atom for atom in atom_list if ((0 < len(identify_bonds(atom, atom_list)) < 3) and (check_connected(atom, identify_bonds(atom, atom_list)) == False))]\n \n extra_1 = []\n for atom in atom_list:\n neighbours = [bond[0] for bond in identify_bonds(atom, atom_list)]\n for i in neighbours:\n neighbours2 = [bond[0] for bond in identify_bonds(i, atom_list)]\n for j in neighbours2:\n if j in initial:\n extra_1.append(atom)\n\n extra_2 = []\n for atom in atom_list:\n neighbours = [bond[0] for bond in identify_bonds(atom, atom_list)]\n check = 0\n for i in neighbours:\n if i in initial:\n check += 1\n if ((check == 2) and (atom not in initial)):\n extra_2.append(atom) \n return (initial + extra_1 + extra_2)", "def calc_distances(marker_list, rf_pairs):\n final_distance = [[marker_list[0], 0]]\n\n for i in range(1, len(marker_list)):\n cur_markers = [marker_list[i-1], marker_list[i]]\n for rf_pair in rf_pairs:\n if rf_pair[0] in cur_markers and rf_pair[1] in cur_markers:\n final_distance.append([cur_markers[1], rf_pairs[rf_pair]])\n break\n return final_distance", "def num_neighbors_by_atom(self):\n result = []\n for i in range(self.num_atoms()):\n result.append(len(self.get_adjacent_atoms(i)))\n return result", "def get_neighbour_cells(position: tuple) -> list:\n cells = []\n y_pos = position[0]\n x_pos = position[1]\n cells.append([x_pos + 1, y_pos])\n cells.append([x_pos + 1, y_pos + 1])\n cells.append([x_pos, y_pos + 1])\n cells.append([x_pos - 1, y_pos + 1])\n cells.append([x_pos - 1, y_pos])\n cells.append([x_pos - 1, y_pos - 1])\n cells.append([x_pos, y_pos - 1])\n cells.append([x_pos + 1, y_pos - 1])\n return cells", "def calculate_distance_matrix(atomlist):\n distlist = []\n for atom in atomlist:\n atomdict = {}\n for partner in atomlist:\n if not str(int(partner[0][1])) in atomdict.keys():\n atomdict[str(int(partner[0][1]))] = []\n atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1]))\n else:\n atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1]))\n atomdict[str(int(partner[0][1]))].sort()\n\n distlist.append(atomdict)\n\n return distlist", "def nine_to_3x3(listy):\n new_side = []\n k = int(len(listy) / 3)\n \n for i in range(k):\n intermediate = []\n for j in range(3):\n intermediate.append(listy.pop(0))\n \n new_side.append(intermediate)\n return new_side", "def calculate_neighbours(self):\n mm_atoms = to_molmod(self)\n neighbours = mm_atoms.graph.neighbors\n neighbours_arr = [sorted(list(value)) for value in neighbours.itervalues()]\n self._neighbours = neighbours_arr", "def get_neighborlist(self):\n self.neighborlist = []\n for i in range(self.sites_number):\n center_site = self.sites[i]\n center_species = center_site.species\n self.neighborlist.append([{center_species: []}])\n self.others.append([])\n for j in range(self.sites_number):\n site_b = self.sites[j]\n if center_site == site_b:\n continue\n if np.linalg.norm(center_site.coords - site_b.coords) <= 1.0 and center_species == site_b.species:\n self.neighborlist[i][center_species].append({j:site_b})\n continue\n if np.linalg.norm(center_site.coords - site_b.coords) <= 2.0 and center_species == site_b.species:\n #Note: site at diagnal will be counted\n self.neighborlist[i][site_b.species].append({j:site_b})\n continue\n return self.neighborlist", "def n_closest_waters(coordinates, atom, oxygens, n):\n\n waters = []\n for i in range(n):\n index = find_closest_atom(atom, oxygens)\n closest_oxygen = oxygens[index]\n if closest_oxygen in coordinates:\n oxygen_index = coordinates.index(closest_oxygen)\n OT = coordinates[oxygen_index]\n HT1 = coordinates[oxygen_index+1]\n HT2 = coordinates[oxygen_index+2]\n water = [OT, HT1, HT2]\n waters.append(water)\n oxygens = remove_atom(oxygens, index)\n return waters", "def calcDistanceList(work_list):\n distance_list = []\n for swap in work_list: # for every work item find distance\n distance_list.append(Cluster.calcDistance(*swap))\n return distance_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates for every atom the distances to all other atoms in atomlist. Returns a list where every element is a list of all distances.
def calculate_distance_matrix(atomlist): distlist = [] for atom in atomlist: atomdict = {} for partner in atomlist: if not str(int(partner[0][1])) in atomdict.keys(): atomdict[str(int(partner[0][1]))] = [] atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1])) else: atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1])) atomdict[str(int(partner[0][1]))].sort() distlist.append(atomdict) return distlist
[ "def _compute_distances(self, atoms: List[CellAtom]):\n muon = self._cell_atoms[self._muon_index]\n\n for atom in atoms:\n atom.distance_from_muon = np.linalg.norm(muon.position - atom.position)", "def calcDistanceList(work_list):\n distance_list = []\n for swap in work_list: # for every work item find distance\n distance_list.append(Cluster.calcDistance(*swap))\n return distance_list", "def _calculate_distances(self):\n all_dists = []\n for ref in range(len(self.atoms)):\n if self.atoms[ref].symbol in self.exclude:\n continue\n indices = list(range(ref+1, len(self.atoms)))\n indices = self._filter_excluded(indices)\n if len(indices) == 0:\n continue\n dists = self.atoms.get_distances(ref, indices, mic=True)\n all_dists += list(dists)\n \n # Normalize by the mean distance\n return np.array(all_dists)/np.mean(all_dists)", "def _get_distances(self):\n for molecule in self.values():\n molecule.get_distances()\n\n # for atom in self.atoms:\n # atom.get_distances()", "def get_distances(self, crds):\n self.all_dist = np.zeros((self.natom, self.natom))\n # Loop over upper triangle of atom pairs\n for iat in range(self.natom-1):\n # Get the atom indices\n at_inds = np.arange(len(crds))\n\n # Calc distances between atoms (only upper triangle though)\n at_msk = at_inds > iat\n all_ut_dist = crds[at_msk] - crds[iat]\n all_ut_dist = np.linalg.norm(all_ut_dist, axis=1)\n\n self.all_dist[iat, iat+1:] = all_ut_dist\n\n # Get lower triangle indices\n self.all_dist = self.all_dist + self.all_dist.T", "def calc_all_dists(self, net):\n dist = []\n for ii in range(self.nnodes):\n dist.append(self.calc_dist(net, ii))\n return dist", "def getAllDistances(self):\n\n if self.distances is None:\n return None\n return self.distances[:self._distanceCount, :]", "def _calc_distance(self, data: np.ndarray):\r\n distances = []\r\n for c in self.centroid:\r\n distance = np.sum((data - c) * (data - c), axis=1)\r\n distances.append(distance)\r\n\r\n distances = np.array(distances)\r\n distances = distances.T\r\n return distances", "def get_molecule_vectors(molecule_atoms):\n\n cutOff = ase.neighborlist.natural_cutoffs(molecule_atoms)\n neighborList = neighborlist.NeighborList(\n cutOff, self_interaction=False, bothways=False)\n neighborList.update(molecule_atoms)\n matrix = neighborList.get_connectivity_matrix()\n rows, columns = matrix.nonzero()\n pair_indices = np.column_stack((rows,columns))\n\n return [molecule_atoms.get_distance(i,j,mic=True,vector=True) for i,j in pair_indices]", "def distances(interaction_list,positions):\n\n distance_list = []\n\n for interaction in interaction_list:\n if interaction[0] < len(positions) and interaction[1] < len(positions):\n distance_list.append(distance(positions[interaction[0]],positions[interaction[1]]))\n\n return(distance_list)", "def distances(self):\n if self._distances is None:\n self._distances = set()\n for i, p1 in enumerate(self):\n for p2 in self[i + 1:]:\n self._distances.add(dist(p1, p2))\n return self._distances", "def distances(self):\n # Distances between atoms within a same chain_id\n if self._coordinates.index.name == 'chain_id':\n # See https://stackoverflow.com/questions/48888843/pandas-use-diff-with-groupby\n # need to use reset_index to use diff with groupby\n return self._coordinates.reset_index().groupby(by='chain_id').diff().set_index(self._coordinates.index).dropna().pow(2).sum(axis='columns').pow(1/2)\n # Distances between successive atoms\n else:\n return self._coordinates.diff().dropna().pow(2).sum(axis='columns').pow(1/2)", "def get_distances(self):\n\t\tdistance = [[self.get_band_distance(row, col) for col in range(self.NUM_BANDS)] for row in range(self.NUM_BANDS)]\n\n\t\treturn distance", "def build_distance_matrix(self):\n coords = self.atomcoords\n self.distancematrix = np.zeros((len(coords), len(coords)))\n for i in range(len(coords)):\n for j in [x for x in range(len(coords)) if x > i]:\n self.distancematrix[i][j] = norm(coords[i] - coords[j])\n self.distancematrix[j][i] = self.distancematrix[i][j]", "def get_closest_neighbours(atomlist, neighbours=2):\n print('atomlist', atomlist)\n neighbourlist = []\n for atom in atomlist:\n listline = [atom[0][0]]\n dists = []\n distsc = []\n for partner in atomlist:\n dists.append(np.linalg.norm(atom[1] - partner[1]))\n distsc.append(np.linalg.norm(atom[1] - partner[1]))\n dists.remove(min(dists))\n for _ in range(neighbours):\n if min(dists) < 2.5:\n listline.append(atomlist[distsc.index(min(dists))][0][0])\n dists.remove(min(dists))\n #listline.append(atomlist[distsc.index(min(dists))][0][0])\n neighbourlist.append(listline)\n return neighbourlist", "def calcDistortionList(work_list):\n distortion_list = []\n for swap in work_list:\n distortion_list.append(Cluster.calcDistortion(*swap)) # call calcDistortion with tuple expansion as args\n return distortion_list", "def _cluster_distances(traj, atom_selection):\n\n\tatom_pairs = list(combinations(atom_selection, 2))\n\tpairwise_distances = mdt.compute_distances(traj=traj, atom_pairs=atom_pairs)\n\n\treturn pairwise_distances", "def get_distances(self):\n N = len(self.cells) # Number of cells\n distances = np.zeros([N, N]) # distances between cells\n positions = self.position_matrix() # positions of cells \n \n # get distances between cells (exploit symmetry between upper and lower triangular form)\n for i, position in enumerate(positions[:-1, :]): # Iterate matrix except the last one\n directions = positions[i+1:, :] - position # direction from i to j > i\n distances[i, i+1:] = np.linalg.norm(directions, axis=1) # length of directions\n \n return distances + distances.T # Add lower triangle of matrix to upper ", "def distances(self, trips: list[Trip]) -> list[float]:\n raise NotImplementedError()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calls read_coordinates and frac_to_cart for every path=name in fragmentnames and returns a dictionary where every returnvalue of frac_to_cart is keyed to its fragment name.
def read_multiple_coordinates(fragmentnames): fragdict = {} for name in fragmentnames: path = name + '/' cell, pos = read_coordinates(path) atomlist = frac_to_cart(cell, pos) atomdict = {} for atom in atomlist: atomdict[atom[0][0]] = atom[1] fragdict[name] = atomlist return fragdict
[ "def load_info(self):\n info = dict()\n\n coordinates = {\n 'coordinates': open(self.temp_prefix, 'r'),\n }\n\n nb_files = {\n 'np': open(self.temp_prefix + 'np.vol', 'r'),\n 'fp': open(self.temp_prefix + 'pxpy.vol', 'r'),\n 'px': open(self.temp_prefix + 'px.vol', 'r'),\n 'py': open(self.temp_prefix + 'py.vol', 'r'),\n }\n\n for key in coordinates:\n for line in coordinates[key]:\n vertex_id = int(line.split()[0])\n pl = [float(number) for number in line.split()]\n info[vertex_id] = {key: np.array(pl[1:])}\n\n for key in nb_files:\n for line in nb_files[key]:\n pl = [int(number) for number in line.split()]\n vertex_id = pl[0]\n if key == 'fp' or key == 'np':\n info[vertex_id][key] = [nb for nb in pl[1:] if nb > 0]\n else:\n info[vertex_id][key] = [\n nb for nb in pl[1:] if nb > 0\n if nb not in info[vertex_id]['np']\n ]\n\n return info", "def read_files():\n with open(\"CvixLerC9.loc\") as loc, open(\"CvixLerC9.qua\") as qua:\n qua_file = (qua.read().split('\\n'))\n qua_file = qua_file[8:-1]\n new_qua = []\n for q in qua_file:\n new_qua.append(q.split('\\t')) # [['1', '1.279502474'], ['3', '0.303712231']....]\n\n new_loc = {}\n header = ''\n read = False\n for i in loc:\n i = i.replace(\"\\n\", '')\n if read:\n for j in i:\n if \" \" != j:\n if header in new_loc.keys():\n new_loc[header].append(j)\n else:\n new_loc[header] = [j]\n if \"(a,b)\" in i:\n header = i\n read = True\n else:\n read = False\n\n elif read:\n for j in i:\n if \" \" != j:\n if header in new_loc.keys():\n new_loc[header].append(j)\n else:\n new_loc[header] = [j]\n\n return new_loc, new_qua", "def read_coordinates(nameFile):\n\n dictionary = {} \n with open(nameFile, 'r') as _file:\n for line in _file:\n name_f = line.split(':')[0]\n name_f = name_f[1:len(name_f)-1]\n\n df = pd.DataFrame(columns = ['X','Y']) # \n coords = line.split(':')[1].split('),')\n coords = [coord.replace('(','').replace(')','').replace(';','').replace(' ','') for coord in coords]\n for j in range(len(coords)):\n df.loc[j]= coords[j].split(',')\n df = df.astype(float)\n\n dictionary[name_f] = df\n\n return dictionary", "def getSeqDict(parts, part_names):\n seqdict = {}\n for key in parts.keys():\n # read\n alignment_file = key + '.fasta'\n alignment_file = os.path.join(input_dir, alignment_file)\n print alignment_file\n if not os.path.isfile(alignment_file):\n print 'not file'\n part_names.pop(part_names.index(key))\n continue\n seqs = readSequences(alignment_file)\n # trim\n seqs = trimSequences(seqs)\n # add length of trimmed seq to parts dict\n parts[key]['length'] = len(seqs[0])\n for s in seqs:\n sp, seqid = s.id.split('__')\n if sp == 'sample':\n seqid = 'sample'\n if byid:\n group_factor = seqid\n else:\n group_factor = sp\n if group_factor in seqdict.keys():\n if key in seqdict[group_factor].keys():\n seqdict[group_factor][key].append(s)\n else:\n seqdict[group_factor][key] = [s]\n else:\n seqdict[group_factor] = {key: [s]}\n return(seqdict, part_names)", "def fragDictBasic():\n\n # Defining basic GCMS Dictionary\n # EMU name fragment name abbreviation, number of carbons, formula minus backbone, approved by antoniewicz, weight of derivatized fragment, formula of derivatized fragment\n Ala159 = labeling.GCMSfragment('alaL_2_3' ,'Alanine,159' ,'Ala159',2,'C6H20NSi' ,0,317,'C15H35NO2Si2')\n Ala85 = labeling.GCMSfragment('alaL_2_3' ,'Alanine,85' ,'Ala85' ,2,'C8H26NOSi2' ,1,317,'C15H35NO2Si2')\n Ala57 = labeling.GCMSfragment('alaL_1_2_3' ,'Alanine,57' ,'Ala57' ,3,'C8H26NO2Si2' ,1,317,'C15H35NO2Si2')\n Gly159 = labeling.GCMSfragment('gly_2' ,'Glycine,159' ,'Gly159',1,'C6H18NSi' ,0,303,'C14H33NO2Si2')\n Gly85 = labeling.GCMSfragment('gly_2' ,'Glycine,85' ,'Gly85' ,1,'C8H24NOSi2' ,1,303,'C14H33NO2Si2')\n Gly57 = labeling.GCMSfragment('gly_1_2' ,'Glycine,57' ,'Gly57' ,2,'C8H24NO2Si2' ,1,303,'C14H33NO2Si2')\n Val85 = labeling.GCMSfragment('valL_2_3_4_5' ,'Valine,85' ,'Val85' ,4,'C8H30NOSi2' ,1,345,'C17H39NO2Si2')\n Val159 = labeling.GCMSfragment('valL_2_3_4_5' ,'Valine,159' ,'Val159',4,'C6H24NSi' ,0,345,'C17H39NO2Si2')\n Val57 = labeling.GCMSfragment('valL_1_2_3_4_5' ,'Valine,57' ,'Val57' ,5,'C8H30NO2Si2' ,1,345,'C17H39NO2Si2')\n Leu159 = labeling.GCMSfragment('leuL_2_3_4_5_6' ,'Leucine,159' ,'Leu159',5,'C6H26NSi' ,0,359,'C18H41NO2Si2')\n Leu85 = labeling.GCMSfragment('leuL_2_3_4_5_6' ,'Leucine,85' ,'Leu85' ,5,'C8H32NOSi2' ,1,359,'C18H41NO2Si2')\n Leu57 = labeling.GCMSfragment('leuL_1_2_3_4_5_6' ,'Leucine,57' ,'Leu57' ,6,'C8H32NO2Si2' ,0,359,'C18H41NO2Si2')\n Ile159 = labeling.GCMSfragment('ileL_2_3_4_5_6' ,'Isoleucine,159' ,'Ile159',5,'C6H26NSi' ,0,359,'C18H41NO2Si2')\n Ile85 = labeling.GCMSfragment('ileL_2_3_4_5_6' ,'Isoleucine,85' ,'Ile85' ,5,'C8H32NOSi2' ,1,359,'C18H41NO2Si2')\n Ile57 = labeling.GCMSfragment('ileL_1_2_3_4_5_6' ,'Isoleucine,57' ,'Ile57' ,6,'C8H32NO2Si2' ,0,359,'C18H41NO2Si2')\n Ser159 = labeling.GCMSfragment('serL_2_3' ,'Serine,159' ,'Ser159',2,'C12H34NOSi2' ,1,447,'C21H49NO3Si3')\n Ser85 = labeling.GCMSfragment('serL_2_3' ,'Serine,85' ,'Ser85' ,2,'C14H40NO2Si3',1,447,'C21H49NO3Si3')\n Ser57 = labeling.GCMSfragment('serL_1_2_3' ,'Serine,57' ,'Ser57' ,3,'C14H40NO3Si3',1,447,'C21H49NO3Si3')\n Thr85 = labeling.GCMSfragment('thrL_2_3_4' ,'Threonine,85' ,'Thr85' ,3,'C14H42NO2Si3',1,461,'C22H51NO3Si3')\n Thr57 = labeling.GCMSfragment('thrL_1_2_3_4' ,'Threonine,57' ,'Thr57' ,4,'C14H42NO3Si3',1,461,'C22H51NO3Si3')\n Met159 = labeling.GCMSfragment('metL_2_3_4_5' ,'Methionine,159' ,'Met159',4,'C6H24NSSi' ,1,377,'C17H39NO2SSi2')\n Met85 = labeling.GCMSfragment('metL_2_3_4_5' ,'Methionine,85' ,'Met85' ,4,'C8H30NOSSi2' ,1,377,'C17H39NO2SSi2')\n Met57 = labeling.GCMSfragment('metL_1_2_3_4_5' ,'Methionine,57' ,'Met57' ,5,'C8H30NO2SSi2',1,377,'C17H39NO2SSi2')\n Phe159 = labeling.GCMSfragment('pheL_2_3_4_5_6_7_8_9' ,'Phenylalanine,159','Phe159',8,'C6H24NSi' ,1,393,'C21H39NO2Si2')\n Phe85 = labeling.GCMSfragment('pheL_2_3_4_5_6_7_8_9' ,'Phenylalanine,85' ,'Phe85' ,8,'C8H30NOSi2' ,1,393,'C21H39NO2Si2')\n Phe57 = labeling.GCMSfragment('pheL_1_2_3_4_5_6_7_8_9','Phenylalanine,57' ,'Phe57' ,9,'C8H30NO2Si2' ,1,393,'C21H39NO2Si2')\n Phe91 = labeling.GCMSfragment('pheL_1_2' ,'Phenylalanine,91' ,'Phe91' ,2,'C12H33NO2Si2',1,393,'C21H39NO2Si2')\n Asp159 = labeling.GCMSfragment('aspL_2_3_4' ,'Aspartate,159' ,'Asp159',3,'C12H34NO2Si2',0,475,'C22H49NO4Si3')\n Asp85 = labeling.GCMSfragment('aspL_2_3_4' ,'Aspartate,85' ,'Asp85' ,3,'C14H40NO3Si3',1,475,'C22H49NO4Si3')\n Asp57 = labeling.GCMSfragment('aspL_1_2_3_4' ,'Aspartate,57' ,'Asp57' ,4,'C14H40NO4Si3',1,475,'C22H49NO4Si3')\n Asp173 = labeling.GCMSfragment('aspL_1_2' ,'Aspartate,173' ,'Asp173',2,'C12H32NO2Si2',1,475,'C22H49NO4Si3')\n Asp99 = labeling.GCMSfragment('aspL_1_2' ,'Aspartate,99' ,'Asp99' ,2,'C14H38NO3Si3',1,475,'C22H49NO4Si3')\n Glu159 = labeling.GCMSfragment('gluL_2_3_4_5' ,'Glutamate,159' ,'Glu159',4,'C12H36NO2Si2',1,489,'C23H51NO4Si3')\n Glu85 = labeling.GCMSfragment('gluL_2_3_4_5' ,'Glutamate,85' ,'Glu85' ,4,'C14H42NO3Si3',1,489,'C23H51NO4Si3')\n Glu57 = labeling.GCMSfragment('gluL_1_2_3_4_5' ,'Glutamate,57' ,'Glu57' ,5,'C14H42NO4Si3',1,489,'C23H51NO4Si3')\n Tyr57 = labeling.GCMSfragment('tyrL_1_2_3_4_5_6_7_8_9','Tyrosine,57' ,'Tyr57' ,9,'C14H44NO3Si3',0,523,'C27H53NO3Si3')\n Tyr159 = labeling.GCMSfragment('tyrL_2_3_4_5_6_7_8_9' ,'Tyrosine,159' ,'Tyr159',8,'C12H38NOSi2' ,0,523,'C27H53NO3Si3')\n Tyr221 = labeling.GCMSfragment('tyrL_1_2' ,'Tyrosine,221' ,'Tyr221',2,'C12H32NO2Si2',1,523,'C27H53NO3Si3')\n Pro159 = labeling.GCMSfragment('proL_2_3_4_5' ,'Proline,159' ,'Pro159',4,'C6H22NSi' ,0,343,'C17H37NO2Si2')\n\n Ala = labeling.LCMSfragment('alaL_1_2_3' ,'Ala,0' ,'Ala' ,3,'H5NO')\n Gly = labeling.LCMSfragment('gly_1_2' ,'Gly,0' ,'Gly' ,2,'H3NO')\n Val = labeling.LCMSfragment('valL_1_2_3_4_5' ,'Val,0' ,'Val' ,5,'H9NO')\n Leu = labeling.LCMSfragment('leuL_1_2_3_4_5_6' ,'Leu,0' ,'Leu' ,6,'H11NO')\n Ile = labeling.LCMSfragment('ileL_1_2_3_4_5_6' ,'Ile,0' ,'Ile' ,6,'H11NO')\n Ser = labeling.LCMSfragment('serL_1_2_3' ,'Ser,0' ,'Ser' ,3,'H5NO2')\n Thr = labeling.LCMSfragment('thrL_1_2_3_4' ,'Thr,0' ,'Thr' ,4,'H7NO2')\n Met = labeling.LCMSfragment('metL_1_2_3_4_5' ,'Met,0' ,'Met' ,5,'H9NOS')\n Phe = labeling.LCMSfragment('pheL_1_2_3_4_5_6_7_8_9','Phe,0' ,'Phe' ,9,'H9NO')\n Asp = labeling.LCMSfragment('aspL_1_2_3_4' ,'Asp,0' ,'Asp' ,4,'H5NO3')\n Glu = labeling.LCMSfragment('gluL_1_2_3_4_5' ,'Glu,0' ,'Glu' ,5,'H7NO3')\n Tyr = labeling.LCMSfragment('tyrL_1_2_3_4_5_6_7_8_9','Tyr,0' ,'Tyr' ,9,'H9NO2')\n Pro = labeling.LCMSfragment('proL_1_2_3_4_5' ,'Pro,0' ,'Pro' ,5,'H7NO')\n Lys = labeling.LCMSfragment('lysL_1_2_3_4_5_6' ,'Lys,0' ,'Lys' ,6,'H12N2O')\n Arg = labeling.LCMSfragment('argL_1_2_3_4_5_6' ,'Arg,0' ,'Arg' ,6,'H12N4O')\n His = labeling.LCMSfragment('hisL_1_2_3_4_5_6' ,'His,0' ,'His' ,6,'H7N3O')\n Trp = labeling.LCMSfragment('trpL_1_2_3_4_5_6_7_8_9_10_11','Trp,0' ,'Trp' ,11,'H10N2O')\n Cys = labeling.LCMSfragment('cysL_1_2_3' ,'Cys,0' ,'Cys' ,3,'H5NOS')\n Asn = labeling.LCMSfragment('asnL_1_2_3_4' ,'Asn,0' ,'Asn' ,4,'H6N2O2')\n Gln = labeling.LCMSfragment('glnL_1_2_3_4_5' ,'Gln,0' ,'Gln' ,5,'H8N2O2')\n\n Suc = labeling.LCMSfragment('succ_1_2_3_4' ,'succ,0' ,'succ' ,4,'H4O4') \n Mal = labeling.LCMSfragment('mal-L_1_2_3_4' ,'mal-L,0' ,'mal-L' ,4,'H4O5')\n Cit = labeling.LCMSfragment('cit_1_2_3_4_5_6' ,'cit,0' ,'cit' ,6,'H5O7') \n Pyr = labeling.LCMSfragment('pyr_1_2_3' ,'pyr,0' ,'pyr' ,3,'H3O3') \n Mev = labeling.LCMSfragment('mev-R_1_2_3_4_5_6' ,'mev-R,0' ,'mev-R' ,6,'H11O4') \n Malcoa= labeling.LCMSfragment('malcoa_1_2_3' ,'malcoa,0' ,'malcoa',3,'HN7O3') \n Akg = labeling.LCMSfragment('akg_1_2_3_4_5' ,'akg,0' ,'akg' ,5,'H4O5') \n Oaa = labeling.LCMSfragment('oaa_1_2_3_4' ,'oaa,0' ,'oaa' ,4,'H2O5') \n G6p = labeling.LCMSfragment('g6p_1_2_3_4_5_6' ,'g6p,0' ,'g6p' ,6,'H11O9P') \n Glx = labeling.LCMSfragment('glx_1_2' ,'glx,0' ,'glx' ,6,'H1O3') \n octa = labeling.LCMSfragment('octa_1_2_3_4_5_6_7_8' ,'octa,0' ,'octa' ,8,'H15O2') \n dca = labeling.LCMSfragment('dca_1_2_3_4_5_6_7_8_9_10','dca,0' ,'dca' ,10,'H20O2') \n ddca = labeling.LCMSfragment('ddca_1_2_3_4_5_6_7_8_9_10_11_12','mev-R,0' ,'ddca' ,12,'H23O2') \n\n \n fdp = labeling.LCMSfragment('fdp_1_2_3_4_5_6' ,'fdp,0' ,'fdp' ,6,'H10O2P' )\n dhap = labeling.LCMSfragment('dhap_1_2_3' ,'dhap,0' ,'dhap' ,3,'H5O6P' )\n M_3pg = labeling.LCMSfragment('3pg_1_2_3' ,'3pg,0' ,'3pg' ,3,'H4O7P' ) # We can't have a variable starting with a number\n pep = labeling.LCMSfragment('pep_1_2_3' ,'pep,0' ,'pep' ,3,'H2O6P' )\n pyr = labeling.LCMSfragment('pyr_1_2_3' ,'pyr,0' ,'pyr' ,3,'H3O3' )\n ru5p = labeling.LCMSfragment('ru5p-D_1_2_3_4_5' ,'ru5p-D,0' ,'ru5p-D',5,'H9O8P' )\n r5p = labeling.LCMSfragment('r5p_1_2_3_4_5' ,'r5p,0' ,'r5p' ,5,'H9O8P' )\n s7p = labeling.LCMSfragment('s7p_1_2_3_4_5_6_7' ,'s7p,0' ,'s7p' ,7,'H13O10P')\n mal = labeling.LCMSfragment('mal-L_1_2_3_4' ,'mal-L,0' ,'mal-L' ,4,'H4O5' )\n\n allFragments = [Ala159,Ala85,Ala57,Gly159,Gly85,Gly57,Val85,Val159,Val57,Leu159,Leu85,Leu57,Ile159,Ile85,Ile57,\n Ser159,Ser85,Ser57,Thr85,Thr57,Met159,Met85,Met57,Phe159,Phe85,Phe57,Phe91,Asp159,Asp85,Asp57,\n Asp173,Asp99,Glu159,Glu85,Glu57,Tyr57,Tyr159,Tyr221,Pro159]\n\n allFragments.extend([Ala,Gly,Val,Leu,Ile,Ser,Thr,Met,Phe,Asp,Glu,Tyr,Pro,Lys,Arg,His,Trp,Cys,Asn,Gln])\n\n allFragments.extend([Suc,Mal,Cit,Pyr,Mev,Malcoa,Akg,Oaa,G6p,Glx,octa,dca,ddca])\n\n allFragments.extend([fdp,dhap,M_3pg,pep,pyr,ru5p,r5p,s7p,mal])\n\n labelFragDict = {}\n for frag in allFragments:\n # Converting names to SBML standard\n emu = core.EMU(frag.emu)\n #emu.convNameComp2Std()\n #frag.emu = emu.convNameStd2SBML()\n frag.emu = emu.getEmuInSBML()\n\n # Creating dictionary\n labelFragDict[frag.abbrev] = frag\n\n return labelFragDict", "def createFileDict(self, c: Cmdr) -> dict[str, Position]:\n d: dict[str, Position] = {}\n for p in c.all_positions():\n d[p.v.fileIndex] = p.copy()\n return d", "def readSurfaceGeo(b18path):\n if not os.path.isfile(b18path):\n print(\"b18 building file not found! Please check!\")\n pass\n else:\n b18file = open(b18path,\"r\")\n b18data = b18file.readlines()\n srfGeoBlock = getDataParagraph(\"_EXTENSION_BuildingGeometry_START_\", \"_EXTENSION_BuildingGeometry_END_\", b18data)\n #now get vertex's coordinate xyz\n vertexdict = dict() #{vertexID:[x,y,z]}\n srfbasicinfo = dict() #{surfaceID:[vertexID]}\n srfInfo = dict() #{surfaceID:[vertices coordinate]}\n for line in srfGeoBlock:\n dline = line.split()\n if \"vertex\" in dline:\n vertexdict[int(dline[1])] = [float(xyz) for xyz in dline[2:]] #{vertexID:[x,y,z]}\n if \"wall\" in dline or \"window\" in dline or \"floor\" in dline or \"ceiling\" in dline or \"roof\" in dline:\n srfbasicinfo[int(dline[1])] = [[int(nrID) for nrID in dline[2:]],dline[0]] #{surfaceID:[[vertexID],construction]}\n #print srfbasicinfo[int(dline[1])]\n for key in srfbasicinfo.keys():\n srfInfo[key] = []\n for vertices in srfbasicinfo[key][0]:\n srfInfo[key].append(vertexdict[vertices])\n b18file.close()\n return srfInfo,vertexdict,srfbasicinfo\n #actually only need srfInfo\n #just getting everything out for now, incase will need to use those", "def read_fragment_library(path_to_lib):\n # list of folders for each subpocket\n subpockets = [\"AP\", \"FP\", \"SE\", \"GA\", \"B1\", \"B2\", \"X\"]\n\n data = {}\n\n # iterate over subpockets\n for subpocket in subpockets:\n\n try:\n data[subpocket] = _read_subpocket_fragments(subpocket, path_to_lib)\n except OSError:\n pass\n\n return data", "def create_slice_coord_dict(path: Path or str) -> None:\n d = dict()\n for c in xlines:\n idx = CROSSLINE_3D == c\n lines = INLINE_3D[idx]\n x = sourceX[idx]\n y = sourceY[idx]\n\n ax = np.empty(len(ilines)) # len(ilines) = 651\n ax[:] = np.nan\n ay = np.empty(len(ilines))\n ay[:] = np.nan\n for l, xx, yy in zip(lines, x, y):\n ax[l - min(ilines)] = xx # min(ilines) = 100\n ay[l - min(ilines)] = yy\n if len(lines) < len(ilines):\n stepx = (-x.max() + x.min()) / (lines.max() - lines.min())\n stepy = (y.max() - y.min()) / (lines.max() - lines.min())\n for i in range(len(ax)): # using the fact that holes start in higher addresses\n if np.isnan(ax[i]):\n ax[i] = ax[i - 1] + stepx\n ay[i] = ay[i - 1] + stepy\n\n d.update({(c, i + min(ilines)): (xx, yy) for i, xx, yy in zip(range(len(ax)), ax, ay)})\n\n # create coord dictionary\n slice_coord_dict = {'iline': {}, 'xline': {}}\n for iline in ilines:\n slice_coord_dict['iline'][iline] = np.array([d[(xline, iline)] for xline in xlines])\n for xline in xlines:\n slice_coord_dict['xline'][xline] = np.array([d[(xline, iline)] for iline in ilines])\n\n with open(path, 'wb') as f:\n pickle.dump(slice_coord_dict, f)", "def load_fragGC_pickle(inFH):\n fojb = pickle.load(inFH)\n\n d = dict()\n for x in fojb:\n taxon_name = x[0]\n d[taxon_name] = dict()\n d[taxon_name]['fragLength'] = []\n d[taxon_name]['fragGC'] = []\n \n for scaf,v in x[1].items(): \n for z in v:\n # fragStart, fragLength, fragGC\n d[taxon_name]['fragLength'].append(z[1])\n d[taxon_name]['fragGC'].append(z[2]) \n return d", "def get_path_locs(self):\n f = open(self.pathlocs_filename)\n lines = f.readlines()\n locs = dict()\n names = dict()\n counter = 0\n index = 0\n for line in lines:\n if counter == 0:\n name = line.strip()\n pass\n elif counter == 1:\n start = int(line.strip()) - 1\n elif counter == 2:\n end = int(line.strip()) - 1\n # extend end to term length + 1\n end += self.termination_length\n locs[(start, end)] = index\n names[(start, end)] = name\n index += 1\n counter += 1\n counter = counter % 3\n self.locs = locs\n self.pathnames = names", "def get_chunks_on_disk(file: os.PathLike | str) -> dict:\n chunks = dict()\n file = Path(file)\n\n if file.suffix.lower() in [\".nc\", \".nc4\"]:\n with nc.Dataset(file) as ds:\n for v in ds.variables:\n chunks[v] = dict()\n for ii, dim in enumerate(ds[v].dimensions):\n chunks[v][dim] = ds[v].chunking()[ii]\n elif file.suffix.lower() == \"zarr\" and file.is_dir():\n with zarr.open(file, \"r\") as ds: # noqa\n for v in ds.arrays():\n # Check if variable is chunked\n if v[1]:\n chunks[v[0]] = v[1]\n else:\n raise NotImplementedError(f\"File type: {file.suffix}.\")\n return chunks", "def parse(self):\n result = {}\n # the resulting dictionary\n for part in self._parser.keys():\n data = [bloc for bloc in self._data if bloc[1:5] == part]\n if data:\n result[part] = self._parser[part](data)\n return result", "def create_chunks(file_names):\n\n\tnew_chunks = []\n\n\tfor name in file_names:\n\n\t\t# Find the .inf file and read the details stored within\n\t\ttry:\n\t\t\tdetails = open(name + suffix + 'inf', 'r').readline()\n\t\texcept IOError:\n\n\t\t\ttry:\n\t\t\t\tdetails = open(name + suffix + 'INF', 'r').readline()\n\t\t\texcept IOError:\n\t\t\t\tprint(\"Couldn't open information file, %s\" % name+suffix+'inf')\n\t\t\t\tsys.exit()\n\n\t\t# Parse the details\n\t\tdetails = [string.rstrip(details)]\n\n\t\tsplitters = [' ', '\\011']\n\n\t\t# Split the details up where certain whitespace characters occur\n\t\tfor s in splitters:\n\n\t\t\tnew_details = []\n\n\t\t\t# Split up each substring (list entry)\n\t\t\tfor d in details:\n\n\t\t\t\tnew_details = new_details + string.split(d, s)\n\n\t\t\tdetails = new_details\n\n\t\t# We should have details about the load and execution addresses\n\n\t\t# Open the file\n\t\ttry:\n\t\t\tin_file = open(name, 'rb')\n\t\texcept IOError:\n\t\t\tprint(\"Couldn't open file, %s\" % name)\n\t\t\tsys.exit()\n\n\t\t# Find the length of the file (don't rely on the .inf file)\n\t\tin_file.seek(0, 2)\n\t\tlength = in_file.tell()\n\t\tin_file.seek(0, 0)\n\n\t\t# Examine the name entry and take the load and execution addresses\n\t\tdot_at = string.find(details[0], '.')\n\t\tif dot_at != -1:\n\t\t\treal_name = details[0][dot_at+1:]\n\t\t\tload, exe = details[1], details[2]\n\t\telse:\n\t\t\treal_name = get_leafname(name)\n\t\t\tload, exe = details[0], details[1]\n\n\t\tload = hex2num(load)\n\t\texe = hex2num(exe)\n\n\t\tif load == None or exe == None:\n\t\t\tprint('Problem with %s: information is possibly incorrect.' % name+suffix+'inf')\n\t\t\tsys.exit()\n\n\t\t# Reset the block number to zero\n\t\tblock_number = 0\n\n\t\t# Long gap\n\t\tgap = 1\n\t\n\t\t# Write block details\n\t\twhile True:\n\t\t\tblock, last = write_block(in_file, real_name, load, exe, length, block_number)\n\n\t\t\tif gap == 1:\n\t\t\t\tnew_chunks.append((0x110, number(2,0x05dc)))\n\t\t\t\tgap = 0\n\t\t\telse:\n\t\t\t\tnew_chunks.append((0x110, number(2,0x0258)))\n\n\t\t\t# Write the block to the list of new chunks\n\n\t\t\t# For old versions, just write the block\n\t\t\tif UEF_major == 0 and UEF_minor < 9:\n\t\t\t\tnew_chunks.append((0x100, block))\n\t\t\telse:\n\t\t\t\tnew_chunks.append((0x100, block))\n\n\t\t\tif last == 1:\n\t\t\t\tbreak\n\n\t\t\t# Increment the block number\n\t\t\tblock_number = block_number + 1\n\n\t\t# Close the input file\n\t\tin_file.close()\n\n\t# Write some finishing bytes to the list of new chunks\n#\tnew_chunks.append((0x110, number(2,0x0258)))\n#\tnew_chunks.append((0x112, number(2,0x0258)))\n\n\t# Return the list of new chunks\n\treturn new_chunks", "def fragment_to_keys(fragment):\n return fragment.strip(\"#\").strip(\"/\").split(\"/\")", "def _extractParamsFromFile(self,file_name):\n datafile = open(file_name,\"r\")\n params={}\n for line in datafile:\n name=''\n i=0\n while(line[i]!=' '):\n name+=line[i]\n i+=1\n i+=1\n number=''\n while(line[i]!='e' and line[i]!='\\n'):\n number+=line[i]\n i+=1\n number=float(number)\n if(line[i]=='e'):\n i+=1\n exponent=''\n while(line[i]!='\\n'):\n exponent+=line[i]\n i+=1\n exponent=float(exponent)\n params[name] = number*10**exponent\n else:\n params[name] = number\n datafile.close() \n return params", "def extract_segment_props(self):\n props = {}\n num_segments = int(self.general['force-scan-series.force-segments.count'])\n for segment in range(num_segments):\n segment_props = ForceArchive(self.file_path).read_properties(\n 'segments/{}/segment-header.properties'.format(segment))\n # noinspection SpellCheckingInspection\n name_jpk = segment_props['force-segment-header.name.name'].replace('-cellhesion200', '')\n normal_name = self.convert_segment_name(name_jpk)\n props[normal_name] = segment_props\n props[normal_name][\"name_jpk\"] = name_jpk\n props[normal_name][\"name\"] = normal_name\n props[normal_name][\"segment_number\"] = str(segment)\n\n return props", "def read_data(self, path, **kwargs):\n\n from glob import glob\n import os\n sc = self.sc\n pdt_lc = np.dtype([('pos', 'f4', 3),('vel', 'f4', 3)])\n\n blockids = kwargs['blockids']\n\n def set_particle_IDs_partition(index, iterator): \n \"\"\"\n Use the aggregate partition counts to set monotonically increasing \n particle indices\n \"\"\"\n p_counts = partition_counts.value\n local_index = 0\n start_index = sum([p_counts[i] for i in range(index)])\n for arr in iterator:\n arr['iOrder'] = range(start_index + local_index, start_index + local_index + len(arr))\n arr['iGroup'] = loc_to_glob_map_b.value[index]\n local_index += len(arr)\n yield arr\n \n def read_file(index, i, chunksize=102400): \n for part,filename in i:\n timein = time.time()\n with open(filename,'rb') as f: \n header = f.read(62500)\n while True:\n chunk = f.read(chunksize*24)\n if len(chunk): \n p_arr = np.frombuffer(chunk, pdt_lc)\n new_arr = np.zeros(len(p_arr), dtype=pdt)\n new_arr['pos'] = p_arr['pos']\n yield new_arr\n else: \n t_elapsed = time.time()-timein\n rate = os.path.getsize(filename)/1e6/t_elapsed\n print 'spark_fof: reading %s took %d seconds in partition %d, %f MB/sec'%(filename, t_elapsed, index, rate)\n break\n \n # determine which files to read\n get_block_ids = re.compile('blk\\.(\\d+)\\.(\\d+)\\.(\\d+)?')\n\n if blockids is None: \n files = glob(os.path.join(self.path,'*/*'))\n else: \n files = []\n for dirname, subdirlist, filelist in os.walk(path):\n try: \n dirnum = int(os.path.basename(dirname))\n if dirnum in blockids: \n for f in filelist:\n ids = get_block_ids.findall(f)\n if len(ids) > 0:\n if all(int(x) in blockids for x in ids[0]):\n files.append(os.path.join(dirname,f))\n except ValueError: \n pass\n\n files.sort()\n nfiles = len(files) \n self.nPartitions = nfiles\n\n print 'spark_fof: Number of input files: ', nfiles\n\n # get particle counts per partition\n nparts = {i:_get_nparts(filename,62500,pdt_lc.itemsize) for i,filename in enumerate(files)}\n\n print 'spark_fof: Total number of particles: ', np.array(nparts.values()).sum()\n \n # set up the map from x,y,z to partition id \n ids = map(lambda x: tuple(map(int, get_block_ids.findall(x)[0])), files)\n ids_map = {x:i for i,x in enumerate(ids)}\n self.ids_map = ids_map\n loc_to_glob_map_b = self.local_to_global_map\n \n ids_map_b = sc.broadcast(ids_map)\n loc_to_glob_map_b = sc.broadcast(loc_to_glob_map_b)\n\n partition_counts = sc.broadcast(nparts)\n\n rec_rdd = (sc.parallelize(zip(ids,files), numSlices=self.nPartitions)\n .map(lambda (id,filename): (ids_map_b.value[id],filename))\n .partitionBy(self.nPartitions).cache()\n .mapPartitionsWithIndex(read_file, preservesPartitioning=True)\n .mapPartitionsWithIndex(set_particle_IDs_partition, \n preservesPartitioning=True))\n \n return rec_rdd", "def load_store(filename):\n result = {}\n # Open file\n with open(filename, 'r') as file:\n # Read first character\n char = file.read(1)\n while char:\n # ; defines a new point\n if char == \";\":\n # The next characters are of the form (x,y,e)\n char = file.read(1) # left bracket\n\n char = file.read(1) # x\n x = char\n char = file.read(1) # comma or second digit\n\n # This means x is a two digit number\n if char != ',':\n # Add the second digit and then cast\n x += char\n x = int(x)\n char = file.read(1) # Now read the comma\n else:\n # One digit number so just cast\n print(char)\n x = int(x)\n \n # Follow a similar process for y and e\n char = file.read(1) # y\n\n y = char\n char = file.read(1) # comma or second digit\n if char != ',':\n y += char\n y = int(y)\n char = file.read(1)\n else:\n y = int(y)\n\n char = file.read(1) # encoded product\n e = char\n char = file.read(1)\n if char != ')':\n e += char\n e = int(e)\n char = file.read(1)\n else:\n e = int(e)\n \n # Add to the dictionary\n coords = (x,y)\n result[(x,y)] = e\n\n char = file.read(1)\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the compound name and the cell parameters from a xd.mas style file specified by 'path'.
def read_xd_master_file(path, errorpointer): filepointer = open(path, 'r') for line in filepointer.readlines(): if 'TITLE' in line: compound_name = line.partition('!')[2].lstrip().rstrip() if 'CELL' in line: cell = [float(i) for i in line.split(" ") if '.' in i] break filepointer.close() try: return compound_name, cell except: errorpointer.write(path + '\n') return None, None
[ "def parse_stellar_parameters(path):\n\n basename = os.path.basename(path).split(\"_\")[-1]\n parent_folder = path.split(\"/\")[-2]\n \n teff, logg, mh = (float(each) for each in \\\n (basename[1:5], basename[6:10], basename[11:16].rstrip(\"x\")))\n alpha_mh = 0.4 if \"alpha04\" in parent_folder.lower() else 0.0\n\n return (teff, logg, mh, alpha_mh)", "def load_from_file(self, path):\n structure = None\n if re.search(\".pdb\", path):\n parser = PDBParser()\n else:\n parser = MMCIFParser()\n\n path = path.strip()\n model_id = os.path.basename(path)\n #if os.path.basename(path).split('.')[-1] == 'gz':\n # GZ = gzip.open(path, 'rb')\n # GZ.close()\n #else :\n\n structure = parser.get_structure(model_id, open_file( path ))\n header = parser.get_header()\n\n return structure, header", "def get_parameters_from_path(fontpath):\n family_name, style_name = get_names_from_path(fontpath)\n parameters = style_name.split('-')\n return parameters", "def getCellByPath(self, path):\n cellname=\"\"\n return cellname", "def read_coordinates(path='', sort=True):\n maspointer = open(path + 'xd.mas', 'r')\n respointer = open(path + 'xd.res', 'r')\n\n positions = {}\n keylist = [] #Needed to keep the atomlist order. This is important for the frequency read function.\n for line in maspointer.readlines():\n if 'CELL ' in line:\n cell = [float(i) for i in line.split(\" \") if '.' in i]\n break\n for line in respointer.readlines():\n if '(' in line and not '!' in line:\n coords = [float(i) for i in line.split(\" \") if '.' in i]\n coords = coords[:-1]\n key = line.split(\" \")[0]\n keylist.append(key)\n positions[key] = coords\n if sort:\n sortkeylist = []\n for i in xrange(len(keylist)):\n j = i + 1\n for key in keylist:\n number = get_number(key)\n if j == int(number):\n sortkeylist.append(key)\n else:\n sortkeylist = keylist\n return cell, positions, sortkeylist", "def format_script_for_cell(path):\n header = '\\n# Cell content replaced by load magic replacement.\\n'\n with open(str(path), encoding='utf8') as f:\n solution = f.read()\n if not solution:\n raise RuntimeError('Solution {} has no content.'.format(path))\n return header + solution", "def get_inc_file(path):\n \n pat = re.compile(r'''PARAMETER\\s*\\((?P<name>[_\\w]*)\\s*=\\s*(?P<value>[\\+\\-\\ded]*)\\)''',\n re.I)\n \n out = {} \n for name, value in pat.findall(open(path).read()):\n orig_value = str(value)\n try:\n out[name.lower()] = float(value.replace('d','e'))\n except ValueError:\n out[name] = orig_value\n return out", "def Read_CSSR(filename):\n f = open(filename)\n#\n# First read unit cell\n#\n tokens = f.readline().split()\n if len(tokens) != 3: \n print \"Format mismatch -- first cell line\"\n sys.exit(1)\n a, b, c = map(float,tokens[:])\n tokens = f.readline().split()\n if len(tokens) < 3: \n print \"Format mismatch -- second cell line\"\n sys.exit(1)\n alpha, beta, gamma = map(float,tokens[0:3])\n\n cell = N.zeros((3,3),N.Float)\n\n alpha, beta, gamma = map(lambda x: x*pi/180.0, (alpha,beta,gamma))\n va = N.array((a,0.0,0.0),N.Float)\n vb = N.array((b*cos(gamma), b*sin(gamma), 0.0),N.Float)\n xxx = (cos(alpha)-cos(beta)*cos(gamma)) / sin(gamma)\n vc = N.array((c*cos(beta), c*xxx, c*sqrt(sin(beta)**2 - xxx**2)),N.Float)\n\n cell[0,:] = va[:]\n cell[1,:] = vb[:]\n cell[2,:] = vc[:]\n\n#\n# Now the atoms\n#\n tokens = f.readline().split()\n natoms = int(tokens[0])\n f.readline() # empty line\n\n crystal = Structure([])\n import re\n p = re.compile(\"[A-z]+\")\n for a in range(natoms):\n tokens = f.readline().split()\n number, tag, x, y, z = tokens[0:5]\n m = p.match(tag)\n if m:\n symbol = m.group()\n else:\n print \"Cannot match \", tag \n crystal.append(Atom(symbol, [float(x), float(y), float(z)]))\n\n crystal.SetUnitCell(cell)\n crystal.SetBoundaryConditions(periodic=True)\n\n return crystal", "def load_coordinates(path):\n\n with open(path, \"rt\") as fd:\n for line in fd:\n\n # Ignore comments\n line = line.split(\"#\")[0]\n\n # Ignore empty lines\n regexp = \"^\\s*$\"\n if re.match(regexp, line):\n continue\n\n kwargs = dict(float=\"([+-]?\\d+(\\.\\d+)(?:[eE][+\\-]?\\d+)*)\")\n regexp = (\n \"^\\s*\"\n \"(?P<ra>{float})\"\n \"\\s+\"\n \"(?P<dec>{float})\"\n \"(\"\n \"\\s+\"\n \"\\[\\s*(?P<pm_ra>{float})\\s*\\]\"\n \"\\s+\"\n \"\\[\\s*(?P<pm_dec>{float})\\s*\\]\"\n \")?\"\n \"\\s*$\"\n )\n\n match = re.match(regexp.format(**kwargs), line)\n\n if not match:\n msg = (\n \"Unable to parse line %r. Astronomical objects must be \"\n \"listed one per line with coordinate values in columns one \"\n \"(right ascension) and two (declination). Proper motions \"\n \"may be optionally specified in columns three (ra) and \"\n \"four (dec), surrounded by brackets -- but, in that case, \"\n \"both of them are required.\" % line\n )\n raise ValueError(msg)\n\n ra = float(match.group(\"ra\"))\n if not 0 <= ra < 360:\n msg = \"Right ascension '%s' not in range [0, 360[ degrees\"\n raise ValueError(msg % ra)\n\n dec = float(match.group(\"dec\"))\n if not -90 <= dec <= 90:\n msg = \"Declination '%s' not in range [-90, 90] degrees\"\n raise ValueError(msg % dec)\n\n pm_ra = match.group(\"pm_ra\")\n if pm_ra is not None:\n pm_ra = float(pm_ra)\n\n pm_dec = match.group(\"pm_dec\")\n if pm_dec is not None:\n pm_dec = float(pm_dec)\n\n yield ra, dec, pm_ra, pm_dec", "def open_file(path):\n book = xlrd.open_workbook(path)\n\n # print number of sheets\n print\n book.nsheets\n\n # print sheet names\n print\n book.sheet_names()\n\n # get the first worksheet\n first_sheet = book.sheet_by_index(0)\n\n # read a row\n print\n first_sheet.row_values(0)\n\n # read a cell\n cell = first_sheet.cell(0, 0)\n print\n cell\n print\n cell.value\n\n # read a row slice\n print\n first_sheet.row_slice(rowx=0,\n start_colx=0,\n end_colx=2)", "def read_anfatec_params(path):\n file_descriptions = []\n spectra_descriptions = []\n scan_params = {}\n parameters = {}\n inside_description = False\n\n\n with io.open(path, 'r', encoding = \"ISO-8859-1\") as f:\n \n for i,row in enumerate(f): \n \n # Get rid of newline characters at the end of the line.\n row = row.strip()\n #check to make sure its not empty \n if row:\n # First line of the file is useless. We tell the reader to stop at ';'\n if row[0] == unicode(';'):\n continue\n \n # This string indicates that we have reached a channel description.\n if row.endswith('Begin') & row.startswith('File'):\n inside_description = True\n continue\n if row.endswith('SpectrumDescBegin'):\n inside_description = True\n continue\n if row.endswith('End') & row.startswith('File'):\n file_descriptions.append(parameters)\n parameters = {}\n inside_description = False\n if row.endswith('SpectrumDescEnd'):\n spectra_descriptions.append(parameters)\n parameters = {}\n \n #split between :; creates list of two elements \n split_row = row.split(':')\n \n for i, el in enumerate(split_row):\n split_row[i] = el.strip()\n \n # We want to save the channel parameters to a separate structure.\n if inside_description:\n parameters[split_row[0]] = split_row[-1]\n else:\n scan_params[split_row[0]] = split_row[-1]\n \n \n return scan_params, file_descriptions, spectra_descriptions", "def read_stellar_properties(path = os.path.join(HERE, '../inputs/stellar_properties.txt')):\n\n # Read in table of stellar types\n data = np.loadtxt(path, skiprows=19, dtype = str)\n\n # Parse\n stypes = data[:,0]\n masses = np.array(data[:,1], dtype=float)\n lums = np.array(data[:,2], dtype=float)\n rads = np.array(data[:,3], dtype=float)\n temps = np.array(data[:,4], dtype=float)\n mvs = np.array(data[:,6], dtype=float)\n\n # Construct dictionary\n dic = {\n \"stypes\" : stypes,\n \"masses\" : masses,\n \"lums\" : lums,\n \"rads\" : rads,\n \"temps\" : temps,\n \"mvs\" : mvs\n }\n\n return dic", "def get_cells(path):\n source = open(path).read()\n lines = source.splitlines()\n\n return get_nbpy_cells_recursively(lines)", "def syntax_from_path(path):\n\n extensions = {\n \".ttl\": \"turtle\",\n \".nt\": \"ntriples\",\n \".trig\": \"trig\",\n \".nq\": \"nquads\",\n }\n\n return extensions[os.path.splitext(path)[1]]", "def readCrystParam(crystfile):\n \n # Default values\n ccell1 = np.eye(3)\n ccell2 = np.eye(3)\n planehkl = [1,0,0]\n diruvw = [0,1,0]\n \n try:\n with open(crystfile,\"r\") as f:\n content = f.readlines()\n except FileNotFoundError:\n content = []\n\n for l in content:\n if l[0].rstrip() == \"#\":\n continue\n line = l.split('=')\n if len(line) == 2:\n if line[0].rstrip()==\"ccell1\":\n ccell1 = eval(line[1].rstrip())\n elif line[0].rstrip()==\"ccell2\":\n ccell2 = eval(line[1].rstrip())\n elif line[0].rstrip()==\"planehkl\":\n planehkl = eval(line[1].rstrip())\n elif line[0].rstrip()==\"diruvw\":\n diruvw = eval(line[1].rstrip())\n else:\n print(\"WARNING: %s is not a supported input\"%(line[0].rstrip()))\n elif len(line) > 2:\n raise SyntaxError(l)\n\n return ccell1, ccell2, planehkl, diruvw", "def get_parameters(fname):\r\n missing = np.nan\r\n c3d = ezc3d.c3d(fname).c3d_swig\r\n c = c3d.parameters().group\r\n units_all = {'Point' : c('POINT').parameter('UNITS').valuesAsString()[0],\r\n 'Mass': 'kg', 'Length': 'm', 'Time': 's', 'g': 9.80665}\r\n if c('POINT').isParameter('ANGLE_UNITS') and c('POINT').isParameter('FORCE_UNITS'):\r\n units_all.update({'Angle' : c('POINT').parameter('ANGLE_UNITS').valuesAsString()[0],\r\n 'Force' : c('POINT').parameter('FORCE_UNITS').valuesAsString()[0],\r\n 'Moment' : c('POINT').parameter('MOMENT_UNITS').valuesAsString()[0],\r\n 'Power' : c('POINT').parameter('POWER_UNITS').valuesAsString()[0]\r\n })\r\n else:\r\n units_all.update({'Angle' : '', 'Force' : '',\r\n 'Moment' : '', 'Power' : ''})\r\n print('{} does not have ANGLE_UNITS.'.format(fname))\r\n if units_all['Point'] == 'cm':\r\n scale = .01\r\n elif units_all['Point'] == 'mm':\r\n scale = .001\r\n else:\r\n scale = 1\r\n units_all['scale'] = scale\r\n if (c3d.parameters().isGroup('ANALYSIS') and\r\n c('ANALYSIS').isParameter('NAMES') and\r\n c('ANALYSIS').isParameter('UNITS')):\r\n units_all.update(dict(zip(c('ANALYSIS').parameter('NAMES').\r\n valuesAsString(),\r\n c('ANALYSIS').parameter('UNITS').\r\n valuesAsString())))\r\n else:\r\n #print('{} does not have ANALYSIS.'.format(fname))\r\n pass\r\n LL, FL = {'L': np.nan, 'R': np.nan}, {'L': np.nan, 'R': np.nan}\r\n if c3d.parameters().isGroup('PROCESSING'):\r\n if c('PROCESSING').isParameter('Bodymass'):\r\n mass = np.round(c('PROCESSING').parameter('Bodymass').\r\n valuesAsDouble()[0], 3)\r\n if c('PROCESSING').isParameter('Height'):\r\n height = np.round(c('PROCESSING').parameter('Height').\r\n valuesAsDouble()[0]*units_all['scale'], 3)\r\n if (c('PROCESSING').isParameter('UpperLegLength') and\r\n c('PROCESSING').isParameter('LowerLegLength')):\r\n LL['L'] = np.round((c('PROCESSING').parameter('UpperLegLength').\r\n valuesAsDouble()[0] +\r\n c('PROCESSING').parameter('LowerLegLength').\r\n valuesAsDouble()[0])*units_all['scale'], 3)\r\n elif c('PROCESSING').isParameter('LLegLength'):\r\n LL['L'] = np.round(c('PROCESSING').parameter('LLegLength').\r\n valuesAsDouble()[0]*units_all['scale'], 3)\r\n elif c('PROCESSING').isParameter('LegLength'):\r\n LL['L'] = np.round(c('PROCESSING').parameter('LegLength').\r\n valuesAsDouble()[0]*units_all['scale'], 3)\r\n if (c('PROCESSING').isParameter('UpperLegLength') and\r\n c('PROCESSING').isParameter('LowerLegLength')):\r\n LL['R'] = np.round((c('PROCESSING').parameter('UpperLegLength').\r\n valuesAsDouble()[0] +\r\n c('PROCESSING').parameter('LowerLegLength').\r\n valuesAsDouble()[0])*units_all['scale'], 3)\r\n elif c('PROCESSING').isParameter('RLegLength'):\r\n LL['R'] = np.round(c('PROCESSING').parameter('RLegLength').\r\n valuesAsDouble()[0]*units_all['scale'], 3)\r\n elif c('PROCESSING').isParameter('LegLength'):\r\n LL['R'] = np.round(c('PROCESSING').parameter('LegLength').\r\n valuesAsDouble()[0]*units_all['scale'], 3)\r\n if c('PROCESSING').isParameter('LFootLength'):\r\n FL['L'] = np.round(c('PROCESSING').parameter('LFootLength').\r\n valuesAsDouble()[0]*units_all['scale'], 3)\r\n elif c('PROCESSING').isParameter('FootLength'):\r\n FL['L'] = np.round(c('PROCESSING').parameter('FootLength').\r\n valuesAsDouble()[0]*units_all['scale'], 3)\r\n if c('PROCESSING').isParameter('RFootLength'):\r\n FL['R'] = np.round(c('PROCESSING').parameter('RFootLength').\r\n valuesAsDouble()[0]*units_all['scale'], 3)\r\n elif c('PROCESSING').isParameter('FootLength'):\r\n FL['R'] = np.round(c('PROCESSING').parameter('FootLength').\r\n valuesAsDouble()[0]*units_all['scale'], 3)\r\n else:\r\n mass, height = np.nan, np.nan\r\n\r\n rates = {'p': c3d.header().frameRate(),\r\n 'a': c3d.header().frameRate() * c3d.header().nbAnalogByFrame()}\r\n frames = {'p': [c3d.header().firstFrame(), c3d.header().lastFrame()],\r\n 'a': [c3d.header().firstFrame() * c3d.header().nbAnalogByFrame(),\r\n c3d.header().lastFrame() * c3d.header().nbAnalogByFrame()]}\r\n\r\n events = get_events(fname, missing=missing)\r\n\r\n param = {'filename': os.path.splitext(os.path.basename(fname))[0],\r\n 'mass': mass, 'height': height, 'LL': LL, 'FL': FL,\r\n 'units_all': units_all,\r\n 'rates': rates, 'frames': frames, 'events': events}\r\n if (c3d.parameters().isGroup('ANALYSIS') and\r\n c('ANALYSIS').isParameter('NAMES') and\r\n c('ANALYSIS').isParameter('VALUES')):\r\n param.update(dict(zip(c('ANALYSIS').parameter('NAMES').valuesAsString(),\r\n np.round(c('ANALYSIS').parameter('VALUES').\r\n valuesAsDouble(), 3))))\r\n\r\n return param", "def read_seq_from_file(path: str):\r\n with open(path, \"r\") as f:\r\n raw = f.read()\r\n \r\n if (os.path.splitext(path)[1] == \".ape\"):\r\n # get name and shape\r\n raw_line0 = raw.split(\"\\n\")[0]\r\n name = re.findall(\"LOCUS.*[0-9]+ bp\", raw_line0)[0].split()[1]\r\n shape = \"circular\" if \"circular\" in raw_line0.split(\"bp\")[-1] else \"linear\"\r\n # split info\r\n features0, seq0 = raw.split(\"ORIGIN\")\r\n # get features\r\n if(\"misc_feature\" in features0):\r\n feature_info_list = features0.split(\"misc_feature\")[1:]\r\n feature_list = []\r\n for info in feature_info_list:\r\n try:\r\n feature = DNAfeature(info)\r\n if (len(feature) > 15):\r\n feature_list.append(feature)\r\n except:\r\n print(\"Exception happened during reading ape file.\")\r\n else:\r\n feature_list = []\r\n\r\n # get sequence\r\n seq = \"\".join(re.sub(\"[0-9 \\n/]+\", \"\", seq0[6:]))\r\n else:\r\n name = \"\"\r\n shape = \"linear\"\r\n seq = raw\r\n feature_list = []\r\n \r\n return seq, name, feature_list, shape", "def read_data(fname, style):\n\n headerSize = 0\n with open(fname, 'r') as f:\n line = \" \"\n\n while line and ('Atoms' not in line):\n headerSize += 1\n line = f.readline()\n\n headerSize += 1 # Catches blank line after 'Atoms' OR first comment line\n\n return np.genfromtxt(fname, skip_header=headerSize)", "def load_control_file(path, config):\n if check_file(path, config['control_file']):\n data = pd.read_excel(path / config['control_file'], config['main_workbook'], engine='openpyxl')\\\n .set_index('Id')\n stations = pd.read_excel(path / config['control_file'], config['stations_workbook'], engine='openpyxl')\\\n .set_index('StationId')\n summertime = pd.read_excel(path / config['control_file'], config['summertime_workbook'], engine='openpyxl')\n else:\n data = stations = summertime = None\n\n return data, stations, summertime" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads the cell parameters from a 'xd.mas' file and the atomic positions from a 'xd.res' file. The function returns a list with the cell parameters and an dictionary which keys the atom name to its fractional coordinates.
def read_coordinates(path='', sort=True): maspointer = open(path + 'xd.mas', 'r') respointer = open(path + 'xd.res', 'r') positions = {} keylist = [] #Needed to keep the atomlist order. This is important for the frequency read function. for line in maspointer.readlines(): if 'CELL ' in line: cell = [float(i) for i in line.split(" ") if '.' in i] break for line in respointer.readlines(): if '(' in line and not '!' in line: coords = [float(i) for i in line.split(" ") if '.' in i] coords = coords[:-1] key = line.split(" ")[0] keylist.append(key) positions[key] = coords if sort: sortkeylist = [] for i in xrange(len(keylist)): j = i + 1 for key in keylist: number = get_number(key) if j == int(number): sortkeylist.append(key) else: sortkeylist = keylist return cell, positions, sortkeylist
[ "def read_xyz(filename):\n\n config = {}\n\n with open(filename, 'r') as f:\n # number of atoms (spins)\n config['nat'] = int(re.findall('\\S+', f.readline())[0])\n\n # box parameters (type, dimension, shape, periodicity)\n sarr = re.findall('\\S+', f.readline())\n config['latt_type'] = sarr[0]\n dims = list(map(int, sarr[1:4]))\n config['latt_box'] = np.array(dims)\n config['box'] = np.diag(dims)\n config['pbc'] = list(map(int, sarr[4:7]))\n if len(sarr) > 7:\n dim_intra = len(sarr) - 7\n\n atom_types = []\n xyz = []\n config['latt_i'] = np.zeros(dims, dtype=int)\n config['latt_atoms'] = np.zeros(dims, dtype=int)\n config['latt_intra'] = np.zeros(tuple(dims) + (dim_intra,), dtype='float64')\n for i in range(config['nat']):\n sarr = re.findall('\\S+', f.readline())\n t = int(sarr[0])\n r = tuple(map(int, sarr[1:4]))\n\n atom_types.append(t)\n xyz.append(r)\n\n config['latt_i'][r] = i\n config['latt_atoms'][r] = t\n\n for j in range(dim_intra):\n ci = float(sarr[4 + j])\n config['latt_intra'][r[0], r[1], r[2], j] = ci\n\n config['atom_types'] = np.array(atom_types)\n config['xyz'] = np.array(xyz)\n \n return config", "def read_xyz(filename):\n #print('Reading geom from:'),filename\n atoms = []\n coordinates = []\n\t\n xyz = open(filename)\n n_atoms = int(xyz.readline())\n title = xyz.readline()\n for line in xyz:\n\tif len(line.strip()) == 0:\n\t\tpass\n\t\tbreak\t\n\tatom,x,y,z = line.split()\n\tatoms.append(atom)\n\tcoordinates.append([float(x), float(y), float(z)])\n xyz.close()\n coordinates = [[w * angtobh for w in ww] for ww in coordinates] #ang to bh\n\n if n_atoms != len(coordinates):\n \tprint('Number of atoms in xyz file doesnt equal to the number of lines.')\n\tsys.exit(1)\n \n return atoms, coordinates", "def ReadAtomParameter(AtomParameterPath):\r\n\r\n AtomParameter=os.path.join(AtomParameterPath,'AtomParameter')\r\n\r\n Key1,Key2,Key3=False,False,False\r\n MaterialAtomDictionary,GasAtomDictionary,MassDictionary={},{},{}\r\n SpecialPair,SpecialPairList=[],[]\r\n\r\n with open(AtomParameter, 'r') as File:\r\n for Line in File.readlines():\r\n if Line.strip():\r\n WordList=Line.strip().split()\r\n if WordList[0]=='#':\r\n continue\r\n elif WordList[0]=='MaterialAtom:':\r\n Key1=True\r\n elif WordList[0]=='GasAtom:':\r\n Key1=False\r\n Key2=True\r\n elif WordList[0]=='SpecialPair:':\r\n Key2=False\r\n Key3=True\r\n\r\n # MaterialAtom\r\n elif Key1==True and WordList[0]!='Number':\r\n MaterialAtomDictionary[WordList[1]]=WordList[2:4]\r\n MassDictionary[WordList[1]] = WordList[5]\r\n elif Key2==True and WordList[0]!='Number':\r\n GasAtomDictionary[WordList[1]]=WordList[2:4]\r\n MassDictionary[WordList[1]] = WordList[4]\r\n elif Key3==True and WordList[0]!='Number':\r\n SpecialPair.append(WordList[1:3])\r\n SpecialPair.append(WordList[3:5])\r\n\r\n SpecialPairList.append(SpecialPair)\r\n\r\n return MaterialAtomDictionary,GasAtomDictionary,SpecialPairList,MassDictionary", "def read(self, FN, natoms=None, return_title=False, \\\n multiplier=None, trajectory=False):\n if not os.path.isfile(FN):\n raise Exception('Coordinate file %s does not exist!' % FN)\n if FN.endswith('.gz'):\n import gzip\n F = gzip.open(FN, 'r')\n else:\n F = open(FN, 'r')\n dat = F.read().strip().split('\\n')\n F.close()\n\n title = dat.pop(0) # Title\n\n if len(dat[0].split()) > 1:\n # VMD format (does not specify number of atoms)\n crd = []\n for line in dat:\n crd = crd + [float(x) for x in line.split()]\n crd = np.resize(crd, (len(crd) / 3, 3))\n else:\n # AMBER format\n file_natoms = int(dat.pop(0)) # Number of atoms\n if (natoms is not None) and (file_natoms != natoms):\n print \"Incorrect number of atoms in crd file\"\n return np.array([])\n\n if trajectory:\n w = 8 # For mdcrd\n else:\n w = 12 # For inpcrd\n crd = []\n for line in dat:\n crd = crd + [float(line[x:x + w]) for x in range(0, len(line), w)]\n crd = np.resize(crd, (len(crd) / 3, 3))\n\n if multiplier is not None:\n crd = multiplier * crd\n if (natoms is not None):\n crd = np.vsplit(crd, crd.shape[0] / natoms)\n print \" read %d configurations from %s\" % (len(crd), FN)\n\n if return_title:\n return (crd, title)\n else:\n return crd", "def open_E_atoms():\n filename = 'E_atom.txt'\n # todo calculate for each atom\n atom_E = {}\n with open(filename) as fh:\n next(fh) # ignore header\n for line in fh:\n atom, E = line.split()\n atom_E[atom] = float(E)\n fh.close()\n return atom_E", "def atoms_coords(pdb_file):\n\n\twith open(pdb_file,\"r\") as my_seq:\n\t\tcoords = []\n\t\tfor line in my_seq:\n\t\t\tif line[0:4] == \"ATOM\":\n\t\t\t\tdict_atom = {}\n\t\t\t\tdict_atom[\"atom\"] = str(line[77:99].strip())\n\t\t\t\tdict_atom[\"residu\"] = str(line[17:21].strip())\n\t\t\t\tdict_atom[\"N° resid\"] = int(line[22:26].strip())\n\t\t\t\tdict_atom[\"x\"] = float(line[22:26].strip())\n\t\t\t\tdict_atom[\"y\"] = float(line[38:46].strip())\n\t\t\t\tdict_atom[\"z\"] = float(line[46:54].strip())\n\t\t\t\tcoords.append(dict_atom)\n\t\tdata_atom = pd.DataFrame(coords)\n\treturn data_atom", "def get_coordinates_pdb(filename):\n # PDB files tend to be a bit of a mess. The x, y and z coordinates\n # are supposed to be in column 31-38, 39-46 and 47-54, but this is\n # not always the case.\n # Because of this the three first columns containing a decimal is used.\n # Since the format doesn't require a space between columns, we use the\n # above column indices as a fallback.\n x_column = None\n V = list()\n # Same with atoms and atom naming.\n # The most robust way to do this is probably\n # to assume that the atomtype is given in column 3.\n atoms = list()\n\n with open(filename, 'r') as f:\n lines = f.readlines()\n for line in lines:\n if line.startswith(\"TER\") or line.startswith(\"END\"):\n break\n if line.startswith(\"ATOM\"):\n tokens = line.split()\n # Try to get the atomtype\n try:\n atom = tokens[2][0]\n if atom in (\"H\", \"C\", \"N\", \"O\", \"S\", \"P\"):\n atoms.append(atom)\n else:\n # e.g. 1HD1\n atom = tokens[2][1]\n if atom == \"H\":\n atoms.append(atom)\n else:\n raise Exception\n except:\n exit(\"Error parsing atomtype for the following line: \\n{0:s}\".format(line))\n\n if x_column == None:\n try:\n # look for x column\n for i, x in enumerate(tokens):\n if \".\" in x and \".\" in tokens[i + 1] and \".\" in tokens[i + 2]:\n x_column = i\n break\n except IndexError:\n exit(\"Error parsing coordinates for the following line: \\n{0:s}\".format(line))\n # Try to read the coordinates\n try:\n V.append(np.asarray(tokens[x_column:x_column + 3], dtype=float))\n except:\n # If that doesn't work, use hardcoded indices\n try:\n x = line[30:38]\n y = line[38:46]\n z = line[46:54]\n V.append(np.asarray([x, y ,z], dtype=float))\n except:\n exit(\"Error parsing input for the following line: \\n{0:s}\".format(line))\n\n\n V = np.asarray(V)\n atoms = np.asarray(atoms)\n assert(V.shape[0] == atoms.size)\n return atoms, V", "def read_parameters_diff_file(coords):\n param_map = hp.read_map(source +\n \"kids_data/\"\n \"COM_CompMap_Compton-SZMap-milca-\"\n \"ymaps_2048_R2.00.fits\")\n params = []\n for point in coords:\n ra, dec = point\n index = declratoindex(dec, ra)\n params.append(param_map[index])\n return params", "def get_parameters(fname):\r\n missing = np.nan\r\n c3d = ezc3d.c3d(fname).c3d_swig\r\n c = c3d.parameters().group\r\n units_all = {'Point' : c('POINT').parameter('UNITS').valuesAsString()[0],\r\n 'Mass': 'kg', 'Length': 'm', 'Time': 's', 'g': 9.80665}\r\n if c('POINT').isParameter('ANGLE_UNITS') and c('POINT').isParameter('FORCE_UNITS'):\r\n units_all.update({'Angle' : c('POINT').parameter('ANGLE_UNITS').valuesAsString()[0],\r\n 'Force' : c('POINT').parameter('FORCE_UNITS').valuesAsString()[0],\r\n 'Moment' : c('POINT').parameter('MOMENT_UNITS').valuesAsString()[0],\r\n 'Power' : c('POINT').parameter('POWER_UNITS').valuesAsString()[0]\r\n })\r\n else:\r\n units_all.update({'Angle' : '', 'Force' : '',\r\n 'Moment' : '', 'Power' : ''})\r\n print('{} does not have ANGLE_UNITS.'.format(fname))\r\n if units_all['Point'] == 'cm':\r\n scale = .01\r\n elif units_all['Point'] == 'mm':\r\n scale = .001\r\n else:\r\n scale = 1\r\n units_all['scale'] = scale\r\n if (c3d.parameters().isGroup('ANALYSIS') and\r\n c('ANALYSIS').isParameter('NAMES') and\r\n c('ANALYSIS').isParameter('UNITS')):\r\n units_all.update(dict(zip(c('ANALYSIS').parameter('NAMES').\r\n valuesAsString(),\r\n c('ANALYSIS').parameter('UNITS').\r\n valuesAsString())))\r\n else:\r\n #print('{} does not have ANALYSIS.'.format(fname))\r\n pass\r\n LL, FL = {'L': np.nan, 'R': np.nan}, {'L': np.nan, 'R': np.nan}\r\n if c3d.parameters().isGroup('PROCESSING'):\r\n if c('PROCESSING').isParameter('Bodymass'):\r\n mass = np.round(c('PROCESSING').parameter('Bodymass').\r\n valuesAsDouble()[0], 3)\r\n if c('PROCESSING').isParameter('Height'):\r\n height = np.round(c('PROCESSING').parameter('Height').\r\n valuesAsDouble()[0]*units_all['scale'], 3)\r\n if (c('PROCESSING').isParameter('UpperLegLength') and\r\n c('PROCESSING').isParameter('LowerLegLength')):\r\n LL['L'] = np.round((c('PROCESSING').parameter('UpperLegLength').\r\n valuesAsDouble()[0] +\r\n c('PROCESSING').parameter('LowerLegLength').\r\n valuesAsDouble()[0])*units_all['scale'], 3)\r\n elif c('PROCESSING').isParameter('LLegLength'):\r\n LL['L'] = np.round(c('PROCESSING').parameter('LLegLength').\r\n valuesAsDouble()[0]*units_all['scale'], 3)\r\n elif c('PROCESSING').isParameter('LegLength'):\r\n LL['L'] = np.round(c('PROCESSING').parameter('LegLength').\r\n valuesAsDouble()[0]*units_all['scale'], 3)\r\n if (c('PROCESSING').isParameter('UpperLegLength') and\r\n c('PROCESSING').isParameter('LowerLegLength')):\r\n LL['R'] = np.round((c('PROCESSING').parameter('UpperLegLength').\r\n valuesAsDouble()[0] +\r\n c('PROCESSING').parameter('LowerLegLength').\r\n valuesAsDouble()[0])*units_all['scale'], 3)\r\n elif c('PROCESSING').isParameter('RLegLength'):\r\n LL['R'] = np.round(c('PROCESSING').parameter('RLegLength').\r\n valuesAsDouble()[0]*units_all['scale'], 3)\r\n elif c('PROCESSING').isParameter('LegLength'):\r\n LL['R'] = np.round(c('PROCESSING').parameter('LegLength').\r\n valuesAsDouble()[0]*units_all['scale'], 3)\r\n if c('PROCESSING').isParameter('LFootLength'):\r\n FL['L'] = np.round(c('PROCESSING').parameter('LFootLength').\r\n valuesAsDouble()[0]*units_all['scale'], 3)\r\n elif c('PROCESSING').isParameter('FootLength'):\r\n FL['L'] = np.round(c('PROCESSING').parameter('FootLength').\r\n valuesAsDouble()[0]*units_all['scale'], 3)\r\n if c('PROCESSING').isParameter('RFootLength'):\r\n FL['R'] = np.round(c('PROCESSING').parameter('RFootLength').\r\n valuesAsDouble()[0]*units_all['scale'], 3)\r\n elif c('PROCESSING').isParameter('FootLength'):\r\n FL['R'] = np.round(c('PROCESSING').parameter('FootLength').\r\n valuesAsDouble()[0]*units_all['scale'], 3)\r\n else:\r\n mass, height = np.nan, np.nan\r\n\r\n rates = {'p': c3d.header().frameRate(),\r\n 'a': c3d.header().frameRate() * c3d.header().nbAnalogByFrame()}\r\n frames = {'p': [c3d.header().firstFrame(), c3d.header().lastFrame()],\r\n 'a': [c3d.header().firstFrame() * c3d.header().nbAnalogByFrame(),\r\n c3d.header().lastFrame() * c3d.header().nbAnalogByFrame()]}\r\n\r\n events = get_events(fname, missing=missing)\r\n\r\n param = {'filename': os.path.splitext(os.path.basename(fname))[0],\r\n 'mass': mass, 'height': height, 'LL': LL, 'FL': FL,\r\n 'units_all': units_all,\r\n 'rates': rates, 'frames': frames, 'events': events}\r\n if (c3d.parameters().isGroup('ANALYSIS') and\r\n c('ANALYSIS').isParameter('NAMES') and\r\n c('ANALYSIS').isParameter('VALUES')):\r\n param.update(dict(zip(c('ANALYSIS').parameter('NAMES').valuesAsString(),\r\n np.round(c('ANALYSIS').parameter('VALUES').\r\n valuesAsDouble(), 3))))\r\n\r\n return param", "def extended_xyz_parse(xyz_d):\n \n s_properties = ['rot_A', \n 'rot_B', \n 'rot_C', \n 'dipole', \n 'polarizability', \n 'homo', \n 'lumo', \n 'band_gap', \n 'ese', \n 'zpe', \n 'u_0K', \n 'u_298.15K', \n 'h_298.15K', \n 'f_298.15K', \n 'cp_298.15K']\n\n mol_properties = {}\n\n\n lines = xyz_d.replace('*^','e').splitlines()\n \n r_no_atoms = lines[0]\n no_atoms = int(r_no_atoms)\n\n r_scalars = lines[1]\n mol_id = r_scalars.split()[:2]\n scalar_properties = np.array(r_scalars.split()[2:], np.float32)\n\n r_mcoords = lines[2:2+no_atoms]\n symbols = [m.split()[0] for m in r_mcoords]\n coords = np.array([m.split()[1:4] for m in r_mcoords], dtype=np.float32)\n \n charges = np.array([m.split()[4] for m in r_mcoords], dtype=np.float32)\n\n r_vibfreqs = lines[2+ no_atoms]\n vib_freqs = np.array([float(freq) for freq in r_vibfreqs.split()], dtype=np.float32)\n\n smiles = lines[3+no_atoms].split()\n inchi = lines[4+no_atoms].split()\n\n mol_properties['no_atoms'] = no_atoms\n mol_properties['mol_id'] = mol_id\n \n for i, p in enumerate(s_properties):\n mol_properties[p] = scalar_properties[i]\n\n mol_properties['symbols'] = symbols\n mol_properties['coords'] = coords\n mol_properties['charges'] = charges\n mol_properties['vib_freqs'] = vib_freqs\n mol_properties['smiles'] = smiles\n mol_properties['inchi'] = inchi\n \n return mol_properties", "def read_xd_master_file(path, errorpointer):\n filepointer = open(path, 'r')\n for line in filepointer.readlines():\n if 'TITLE' in line:\n compound_name = line.partition('!')[2].lstrip().rstrip()\n if 'CELL' in line:\n cell = [float(i) for i in line.split(\" \") if '.' in i]\n break\n filepointer.close()\n try:\n return compound_name, cell\n except:\n errorpointer.write(path + '\\n')\n return None, None", "def _extractParamsFromFile(self,file_name):\n datafile = open(file_name,\"r\")\n params={}\n for line in datafile:\n name=''\n i=0\n while(line[i]!=' '):\n name+=line[i]\n i+=1\n i+=1\n number=''\n while(line[i]!='e' and line[i]!='\\n'):\n number+=line[i]\n i+=1\n number=float(number)\n if(line[i]=='e'):\n i+=1\n exponent=''\n while(line[i]!='\\n'):\n exponent+=line[i]\n i+=1\n exponent=float(exponent)\n params[name] = number*10**exponent\n else:\n params[name] = number\n datafile.close() \n return params", "def get_coordinates_xyz(filename):\n\n f = open(filename, 'r')\n V = list()\n atoms = list()\n n_atoms = 0\n\n # Read the first line to obtain the number of atoms to read\n try:\n n_atoms = int(f.readline())\n except ValueError:\n print(\"Could not obtain the number of atoms in the .xyz file. \"+filename)\n return None\n\n # Skip the title line\n f.readline()\n\n # Use the number of atoms to not read beyond the end of a file\n for lines_read, line in enumerate(f):\n\n if lines_read == n_atoms:\n break\n\n atom = re.findall(r'[a-zA-Z]+', line)[0]\n # atom = atom.upper()\n\n numbers = re.findall(r'[-]?\\d+\\.\\d*(?:[Ee][-\\+]\\d+)?', line)\n numbers = [float(number) for number in numbers]\n\n # The numbers are not valid unless we obtain exacly three\n if len(numbers) == 3:\n V.append(np.array(numbers))\n atoms.append(atom)\n else:\n exit(\"Reading the .xyz file failed in line {0}. Please check the format.\".format(lines_read + 2))\n\n f.close()\n atoms = np.array(atoms)\n V = np.array(V)\n return atoms, V", "def get_coordinates_xyz(filename):\n\n f = open(filename, 'r')\n V = list()\n atoms = list()\n n_atoms = 0\n\n # Read the first line to obtain the number of atoms to read\n try:\n n_atoms = int(f.readline())\n except ValueError:\n exit(\"Could not obtain the number of atoms in the .xyz file.\")\n\n # Skip the title line\n f.readline()\n\n # Use the number of atoms to not read beyond the end of a file\n for lines_read, line in enumerate(f):\n\n if lines_read == n_atoms:\n break\n\n atom = re.findall(r'[a-zA-Z]+', line)[0]\n atom = atom.upper()\n\n numbers = re.findall(r'[-]?\\d+\\.\\d*(?:[Ee][-\\+]\\d+)?', line)\n numbers = [float(number) for number in numbers]\n\n # The numbers are not valid unless we obtain exacly three\n if len(numbers) == 3:\n V.append(np.array(numbers))\n atoms.append(atom)\n else:\n exit(\"Reading the .xyz file failed in line {0}. Please check the format.\".format(lines_read + 2))\n\n f.close()\n atoms = np.array(atoms)\n V = np.array(V)\n return atoms, V", "def read_den_file(file_name: str):\n with open(file_name, \"r\") as fid:\n l_content = fid.readlines()\n\n number_lines = int(l_content[1])\n\n hh = l_content[number_lines+2].strip().split()\n rad = float(numpy.pi/180.)\n cell_parameters = numpy.array([float(hh[0]), float(hh[1]), float(hh[2]), \n float(hh[3])*rad, float(hh[4])*rad, float(hh[5])*rad],\n dtype=float)\n\n [points_a, points_b, points_c, n_el_symm, centr, n_shift] = [\n int(hh) for hh in l_content[number_lines+3][:-1].split()]\n points_abc = numpy.array((points_a, points_b, points_c), dtype=int)\n centrosymmetry = bool(centr)\n\n l_ind_xyz = []\n l_dens = []\n for line in l_content[2:number_lines+2]:\n hh = line.strip().split()\n ind_x, ind_y, ind_z = int(hh[0]), int(hh[1]), int(hh[2])\n den_f, den_a = 0., 0.\n den = float(hh[3])\n if len(hh) == 6:\n den_f = float(hh[4])\n den_a = float(hh[5])\n elif den >= 0.:\n den_f = den\n else: # den < 0 \n den_a = den\n l_ind_xyz.append((ind_x, ind_y, ind_z))\n l_dens.append((den, den_f, den_a))\n indexes_xyz = numpy.array(l_ind_xyz, dtype=int).transpose()\n densities = numpy.array(l_dens, dtype=float).transpose()\n\n r_11, r_12, r_13, r_21, r_22, r_23, r_31, r_32, r_33 = [], [], [], [], \\\n [], [], [], [], []\n b_1, b_2, b_3 = [], [], []\n for line in l_content[number_lines+4:number_lines+4+n_el_symm]:\n hh = line.replace(\"-\", \" -\").strip().split()\n r_11.append(int(hh[0]))\n r_12.append(int(hh[3]))\n r_13.append(int(hh[6]))\n \n r_21.append(int(hh[1]))\n r_22.append(int(hh[4]))\n r_23.append(int(hh[7]))\n\n r_31.append(int(hh[2]))\n r_32.append(int(hh[5]))\n r_33.append(int(hh[8]))\n\n b_1.append(float(hh[9]))\n b_2.append(float(hh[10]))\n b_3.append(float(hh[11]))\n\n b_i = (b_1, b_2, b_3)\n r_ij = (r_11, r_12, r_13, r_21, r_22, r_23, r_31, r_32, r_33)\n\n sym_elems = form_symm_elems_by_b_i_r_ij(b_i, r_ij)\n\n shift_1, shift_2, shift_3 = [], [], []\n for line in l_content[number_lines+4+n_el_symm:\n number_lines+4+n_el_symm+n_shift]:\n hh = line.strip().split()\n shift_1.append(float(hh[0]))\n shift_2.append(float(hh[1]))\n shift_3.append(float(hh[2]))\n\n sh_num_x, sh_num_y, sh_num_z, sh_den = calc_numerators_denominator_for_b_i(\n shift_1, shift_2, shift_3)\n\n np_shift = numpy.stack((sh_num_x, sh_num_y, sh_num_z, sh_den), axis=0)\n\n return points_abc, cell_parameters, sym_elems, indexes_xyz, densities, \\\n centrosymmetry, np_shift", "def read_in_molecule_data(filename):\r\n n, atoms = read_xyzfile(filename)\r\n Molecule = molecule(atoms)\r\n return Molecule, n", "def get_params_from_input_file(filename):\n from landlab.core import load_params\n\n mpd_params = load_params(filename)\n return mpd_params", "def load_xyz(filename):\n periodic = load_periodic()\n #read molecule\n with open(filename) as f:\n size = int(next(f))\n title = next(f).strip()\n molecule = Molecule(title,size)\n for _ in range(size):\n row = next(f).split()\n tag = row[0]\n element = periodic[tag]\n coordinate = []\n for j in range(3):\n coordinate.append(float(row[j+1]))\n atom = Atom(element,coordinate)\n\n molecule.append(atom)\n f.close()\n \n return molecule", "def read_multiple_coordinates(fragmentnames):\n fragdict = {}\n for name in fragmentnames:\n path = name + '/'\n cell, pos = read_coordinates(path)\n atomlist = frac_to_cart(cell, pos)\n atomdict = {}\n for atom in atomlist:\n atomdict[atom[0][0]] = atom[1]\n fragdict[name] = atomlist\n return fragdict" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the number in the brackets of an atomname.
def get_number(atomname): switch = False number = '' for char in atomname: if char == ')': switch = False if switch: number += char if char == '(': switch = True return number
[ "def int_atom(atom):\n global __ATOM_LIST__\n #print(atom)\n atom = atom.lower()\n return __ATOM_LIST__.index(atom) + 1", "def get_number(name):\n try:\n num = int(re.findall(\"[0-9]+\", name)[0])\n except:\n num = -1\n return num", "def get_tag_number(element):\n reg = re.compile(r'(\\d+)')\n num = int(re.search(reg, element).group())\n return num", "def get_index_from_tensor_name(tensor_name):\n return int(tensor_name.split(':')[1])", "def ParseUniformName(name):\n temp = re.match(r'(\\w+)(?:\\[(\\d+)\\])?', name)\n base = temp.group(1)\n count = temp.group(2)\n return base, count", "def numberOfName(self, name):\n for i, par in enumerate(self.names):\n if par.name == name: return i\n return -1", "def _format_receptor_name(self, receptor_name):\n if receptor_name.startswith(\"m\"):\n receptor_number = int(receptor_name.replace(\"m\", \"\"))\n else:\n receptor_number = int(receptor_name)\n return receptor_number", "def get_amount_of_digits(self, name: str):\n x = -1\n while name[x - 1].isdigit():\n x -= 1\n if name[:x].endswith(\"/streaming/p\"):\n return x", "def get_atom_intention(self, atom_name):\n source, _clone = self._atomdetail_by_name(atom_name)\n return source.intention", "def getbarvarnameindex(self,somename_):\n if isinstance(somename_,unicode):\n somename_ = somename_.encode(\"utf-8\",errors=\"replace\")\n asgn_ = ctypes.c_int32()\n index_ = ctypes.c_int32()\n res = __library__.MSK_XX_getbarvarnameindex(self.__nativep,somename_,ctypes.byref(asgn_),ctypes.byref(index_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n asgn_ = asgn_.value\n _asgn_return_value = asgn_\n index_ = index_.value\n _index_return_value = index_\n return (_asgn_return_value,_index_return_value)", "def getResidueNumber(self, iAtom):\n return self._getResiduePointer(iAtom)+1", "def get_number_from_symbol(symbol):\n return elements[symbol]['number']", "def _id_from_atom(cls, atom_element):\n return atom_element.text.rsplit('/', 1)[1]", "def get_atomic_number(molecule, atom_index):\n return molecule.GetAtomAtomicNumber(atom_index)", "def get_contract_number(contract_name):\n return int(contract_name.split(\"_\")[0])", "def extractNum(self):\n return other.NameParser(self).extractNum()", "def number(cls, tileName):\n return TILENAMEMAP[tileName]['Number'] if tileName in TILENAMEMAP else None", "def get_atom_index(self, atom):\n return self.atom_indices[atom]", "def get_angle_bracket_count(string):\n string_count = string.count('<') + string.count('>')\n return string_count" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create and init a conv1d layer with spectral normalization
def _conv1d_spect(ni, no, ks=1, stride=1, padding=0, bias=False): conv = nn.Conv1d(ni, no, ks, stride=stride, padding=padding, bias=bias) nn.init.kaiming_normal_(conv.weight) if bias: conv.bias.data.zero_() return spectral_norm(conv)
[ "def __conv_initialize(layer: nn.Module) -> None:\n if type(layer) == nn.Conv2d:\n nn.init.kaiming_normal_(layer.weight)", "def conv_init(conv, act='linear'):\r\n n = conv.kernel_size[0] * conv.kernel_size[1] * conv.out_channels\r\n conv.weight.data.normal_(0, math.sqrt(2. / n))", "def test_conv1d():\n filters = 3\n kernel_size = 2\n strides = 1\n batch_size = 2\n in_channels = 3\n input_size = 5\n input_shape = (batch_size, input_size, in_channels)\n\n keras_layer = keras.layers.Conv1D(filters=filters, kernel_size=kernel_size, strides=strides, use_bias=True, bias_initializer=\"ones\")\n input_layer = keras.Input(batch_shape=input_shape)\n keras_model = keras.models.Model(input=input_layer, outputs=keras_layer(input_layer))\n\n new_weights = np.arange(18).reshape(2, 3, 3)\n keras_layer.set_weights([new_weights, keras_layer.get_weights()[1]])\n\n kinput = np.arange(batch_size * input_size * in_channels).reshape(input_shape)\n kout = keras_model.predict(kinput)\n\n torch_model, _ = translate.translate_layer(keras_layer)\n tinput = torch.Tensor(kinput).permute(0, 2, 1)\n tout = torch_model(tinput).permute(0, 2, 1)\n assert np.isclose(kout, tout.cpu().data.numpy()).all()", "def __init__(self, in_channels, out_channels):\n super(CNN, self).__init__()\n self.conv1 = nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=5, padding=1)", "def SNConv2d(*args, **kwargs):\n return spectral_norm(nn.Conv2d(*args, **kwargs))", "def __conv_initialize(layer: nn.Module) -> None:\n if type(layer) == nn.Linear:\n nn.init.kaiming_normal_(layer.weight)", "def _init_conv_weights(self):\n normal_init(self.conv)", "def conv_spectral_norm(module, image_width, image_height, name='weight', to_norm=1, n_power_iterations=1, eps=1e-12, ):\n ConvSpectralNorm.apply(module, image_width, image_height, name, to_norm, n_power_iterations, eps,)\n return module", "def __init__(self, in_channels, out_channels, kernel_size, dilation, causal=False, weight_init='none', normalization='weight', nonlinearity='linear'):\n super(C, self).__init__()\n self.causal = causal\n if causal:\n self.padding = (kernel_size - 1) * dilation\n else:\n self.padding = (kernel_size - 1) * dilation // 2\n\n self.conv = nn.Conv1d(in_channels, out_channels, kernel_size,\n stride=1, # paper: 'The stride of convolution is always 1.'\n padding=self.padding, dilation=dilation)\n\n if normalization == 'weight':\n self.conv = nn.utils.weight_norm(self.conv)\n elif normalization == 'layer':\n self.layer_norm = LayerNorm(out_channels)\n\n self.nonlinearity = nonlinearity\n if weight_init == 'kaiming':\n nn.init.kaiming_normal_(self.conv.weight, mode='fan_out', nonlinearity=nonlinearity)\n elif weight_init == 'xavier':\n nn.init.xavier_uniform_(self.conv.weight, nn.init.calculate_gain(nonlinearity))", "def convolution(prev_layer, n_filters, hype_space, force_ksize=None):\n if force_ksize is not None:\n k = force_ksize\n else:\n k = int(round(hype_space['conv_kernel_size']))\n return keras.layers.convolutional.Conv1D(\n filters=n_filters, kernel_size=k, strides=1,\n padding='same', activation=hype_space['activation'],\n kernel_regularizer=keras.regularizers.l2(\n STARTING_L2_REG * hype_space['l2_weight_reg_mult'])\n )(prev_layer)", "def add_conv_type1(model, depth, input_shape=None):\n if input_shape is not None:\n model.add(Convolution2D(depth, 5, 5, subsample=(2, 2), \\\n input_shape=input_shape))\n else:\n model.add(Convolution2D(depth, 5, 5, subsample=(2, 2), \\\n activation='relu', W_regularizer=l2(0.05)))", "def ch_CNN(layer):\r\n # layer=BatchNormalization()(layer)\r\n\r\n layer = Conv2D(16, (2, 2))(layer)\r\n # layer=BatchNormalization()(layer)\r\n layer = ReLU()(layer)\r\n layer = MaxPooling2D(pool_size=(2, 2))(layer)\r\n layer = se_block(16, layer)\r\n layer = Conv2D(32, (3, 3))(layer)\r\n # layer=BatchNormalization()(layer)\r\n layer = ReLU()(layer)\r\n layer = MaxPooling2D(pool_size=(2, 2))(layer)\r\n layer = se_block(32, layer)\r\n layer = Conv2D(64, (3, 3))(layer)\r\n # layer=BatchNormalization()(layer)\r\n layer = ReLU()(layer)\r\n layer = MaxPooling2D(pool_size=(2, 2))(layer)\r\n layer = se_block(64, layer)\r\n\r\n return layer", "def init_wide_conv(self):\n self._layers = [\n tf.keras.layers.Conv2D(input_shape=(MAP_SIZE_x, MAP_SIZE_y, CHANNELS),\n filters=64, kernel_size=KERNEL_SIZE, padding='same', activation=tf.nn.relu),\n tf.keras.layers.MaxPooling2D(pool_size=(3,3), strides=2),\n tf.keras.layers.Conv2D(filters=192, kernel_size=KERNEL_SIZE, padding='same', activation=tf.nn.relu),\n tf.keras.layers.MaxPooling2D(pool_size=(3,3), strides=2),\n tf.keras.layers.Conv2D(filters=384, kernel_size=(3,3), padding='same', activation=tf.nn.relu),\n tf.keras.layers.Conv2D(filters=384, kernel_size=(3,3), padding='same', activation=tf.nn.relu),\n tf.keras.layers.Conv2D(filters=192, kernel_size=(3,3), padding='same', activation=tf.nn.relu),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(units=256, activation=tf.nn.relu),\n tf.keras.layers.Dense(units=10, activation=tf.nn.relu),\n tf.keras.layers.Dense(units=5)\n ]", "def __init__(self, in_channels, out_channels, bias):\n super(Conv1d1x1, self).__init__(in_channels, out_channels,\n kernel_size=1, padding=0,\n dilation=1, bias=bias)", "def _first_conv(x: tf.Tensor) -> tf.Tensor:\n with slim.arg_scope([slim.conv2d], activation_fn=None, normalizer_fn=None):\n x = ResNet._conv2d_same(x, 64, 7, stride=2, scope='conv1')\n return slim.max_pool2d(x, [3, 3], stride=2, scope='pool1')", "def ggml_conv_1d_ph(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, s: int, d: int) -> ffi.CData:\n ...", "def __init__(self, in_channels, out_channels, kernel_size, stride, padding=11, upsample=None, output_padding=1,\n use_batch_norm=False) -> NoReturn:\n super(Transpose1dLayer, self).__init__()\n self.upsample = upsample\n reflection_pad = nn.ConstantPad1d(kernel_size // 2, value=0)\n conv1d = nn.Conv1d(in_channels, out_channels, kernel_size, stride)\n conv1d.weight.data.normal_(0.0, 0.02)\n Conv1dTrans = nn.ConvTranspose1d(in_channels, out_channels, kernel_size, stride, padding, output_padding)\n batch_norm = nn.BatchNorm1d(out_channels)\n operation_list = [reflection_pad, conv1d] if self.upsample else [Conv1dTrans]\n if use_batch_norm:\n operation_list.append(batch_norm)\n self.transpose_ops = nn.Sequential(*operation_list)", "def conv_lb(prev_layer, num_filters, layer_name, pad=\"same\", batch_norm=True):\n weight_init = RandomNormal(stddev=0.02)\n new_layer = Conv2D(\n num_filters, FILTER, strides=STRIDE, padding=pad, kernel_initializer=weight_init\n )(prev_layer)\n if batch_norm:\n new_layer = BatchNormalization()(new_layer, training=True)\n new_layer = LeakyReLU(alpha=LEAKY_RELU_ALPHA, name=layer_name)(new_layer)\n return new_layer", "def _make_conv_level(in_channels, out_channels, num_convs, norm_func,\n stride=1, dilation=1):\n layers = []\n for i in range(num_convs):\n layers.extend([\n nn.Conv2D(in_channels, out_channels, kernel_size=3,\n stride=stride if i == 0 else 1,\n padding=dilation, bias_attr=False, dilation=dilation),\n norm_func(out_channels),\n nn.ReLU()])\n\n in_channels = out_channels\n\n return nn.Sequential(*layers)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper function that returns dedicated directory for Post media. This organizes user uploaded Post content and is used by `ministry.models.Post.attachment` to save uploaded content. Arguments =========
def post_media_dir(instance, filename, prepend=settings.MEDIA_ROOT): if instance.ministry: _ministry = instance.ministry elif instance.campaign: _ministry = instance.campaign.ministry else: e = 'There was an unknown error finding a dir for %s' % instance.title raise AttributeError(e) return path.join(generic_media_dir(_ministry, prepend=prepend), 'post_media', filename)
[ "def upload(self, post):\n # TODO: handle filename conflicts\n directory = \".\".join(self.filename.split(\".\")[:-1])\n\n self.abspath = os.path.join(self.root_dir, directory)\n self.localpath = os.path.join(\"/static/gallery\", directory)\n if not os.path.exists(self.abspath):\n os.makedirs(self.abspath)\n print(\"save picture\", os.path.join(self.abspath, self.filename))\n post.save(os.path.join(self.abspath, self.filename))", "def get_media_directory():\n\treturn _paths[_MEDIA_DIRECTORY_KEY]", "def create_news_post_dir(instance, prepend=settings.MEDIA_ROOT):\n for _ in (post_media_dir,):\n _path = path.split(_(instance, \"\", prepend=prepend))[0]\n try:\n mkdir(_path)\n except FileExistsError:\n pass\n except FileNotFoundError:\n if instance.ministry:\n _ministry = instance.ministry\n elif instance.campaign:\n _campaign = instance.campaign\n _ministry = _campaign.ministry\n else:\n e = 'There was an unknown error finding a dir for %s' % instance.name\n raise AttributeError(e)\n\n # NOTE: this is infinitely recursive if `prepend` does not lead to correct directory\n create_news_post_dir(instance, prepend=prepend)", "def get_server_storage_path():\n return '/'.join([settings.BASE_DIR, 'media'])", "def get_media_dir(self):\n dir_path = _paths.concat(self._gnbase, _DIRNAME_GNMEDIA)\n if not _os.path.isdir(dir_path):\n raise OSError('GEONIS media directory {!r} does not exist'.format(dir_path))\n return dir_path", "def get_media_root():\n if 'test' in sys.argv:\n return get_unit_test_media_root()\n else:\n return django_settings.MEDIA_ROOT", "def get_gallery(self):\n return os.path.join(self.directory, GALLERY_DIR)", "def save(self):\n self.directory = self.directory.lstrip('/')\n media_relative_url = settings.MEDIA_URL.lstrip('/')\n self.directory = self.directory.replace(media_relative_url, '', 1)\n return super(Gallery, self).save()", "def get_base_path(self):\n return os.path.join(settings.MEDIA_ROOT, self.tiles_dir)", "def media_path(self):\n return self._path", "def get_imagen_path(self):\n return f\"media/{self.imagen.name}\"", "def getMediaPath(self, path):", "def _create_folder_structure(self):\n # Copy the public and templates folder\n spg_common.log(\"Copying gallery template files...\")\n copytree(\n Path(pkg_resources.resource_filename(\"simplegallery\", \"data/templates\")),\n self.gallery_path / \"templates\",\n )\n copytree(\n Path(pkg_resources.resource_filename(\"simplegallery\", \"data/public\")),\n self.public_gallery_dir,\n )\n\n photos_dir = self.public_gallery_dir / \"images\" / \"photos\"\n if not photos_dir.exists():\n photos_dir.mkdir(parents=True)\n spg_common.log(f\"Moving all photos and videos to {pltostr(photos_dir)}..\")\n\n for path in self.image_source.iterdir():\n basename_lower = pltostr(path).lower()\n if (\n basename_lower.endswith(\".jpg\")\n or basename_lower.endswith(\".jpeg\")\n or basename_lower.endswith(\".gif\")\n or basename_lower.endswith(\".mp4\")\n or basename_lower.endswith(\".png\")\n ):\n shutil.copy(path, photos_dir / path.name)", "def publish_post(source_file):\n\n new_file = source_file.replace('_posts', 'source/_posts')\n new_file = abspath(new_file)\n\n if not exists(dst_post_dir):\n os.makedirs(dst_post_dir)\n\n content = str()\n with open(source_file, encoding='utf-8') as f:\n for line in f:\n if '(images/' in line or '(images\\\\' in line:\n line = line.replace('(images/', '(' + image_server)\n line = line.replace('(images\\\\', '(' + image_server)\n content += line\n\n with open(new_file, mode='w', encoding='utf-8') as f:\n f.writelines(content)", "def media_folder_products(string):\r\n if not string:\r\n string = 'products_images/default.jpg'\r\n\r\n return f'{settings.MEDIA_URL}{string}'", "def get_add_media(self):\r\n return AnkiConnect.request(\r\n \"multi\",\r\n actions=[\r\n AnkiConnect.request(\r\n \"storeMediaFile\",\r\n filename=key,\r\n data=value\r\n )\r\n for key, value in MEDIA.items()\r\n ]\r\n )", "def prepare_media_url(self, object):\n if object.media is not None:\n return os.path.join(settings.MEDIA_URL, object.media.media_file.name)\n else:\n return ''", "def get_media_path(self):\n return Consts.EMPTY_STRING", "def createContent(content, groupId, postData, postFiles, path):\n jsonSet = {}\n if contentData[\"dis\"] is not None:\n jsonSet[\"dis\"] = contentData[\"dis\"]\n if contentData[\"url\"] is not None:\n jsonSet[\"url\"] = contentData[\"url\"]\n elif postFiles.get(contentData[\"tempFileId\"]) is not None:\n contentFile = postFiles.get(contentData[\"tempFileId\"])\n jsonSet[\"url\"] = path.join(\n path,\n \"{0}.{1}\".format(\n {contentData[\"_id\"]},\n contentFile.filename.split(\".\")[-1],\n ),\n )\n contentFile.save(jsonSet[\"url\"])\n else:\n if \"video\" in content:\n createdContent = db.Video.insert(jsonSet)\n elif \"pdf\" in content:\n createdContent = db.Pdf.insert(jsonSet)\n group = db.Group.find_one_or_404({\"_id\": ObjectId(groupId)})\n group[\"contentIds\"].append(createdContent[\"_id\"])\n return (\n jsonify({\"msg\": \"Your files have been added!\"}),\n 200,\n ) # TODO: What do we want to return here?" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Utility function that creates a dedicated directory for Post media. Arguments =========
def create_news_post_dir(instance, prepend=settings.MEDIA_ROOT): for _ in (post_media_dir,): _path = path.split(_(instance, "", prepend=prepend))[0] try: mkdir(_path) except FileExistsError: pass except FileNotFoundError: if instance.ministry: _ministry = instance.ministry elif instance.campaign: _campaign = instance.campaign _ministry = _campaign.ministry else: e = 'There was an unknown error finding a dir for %s' % instance.name raise AttributeError(e) # NOTE: this is infinitely recursive if `prepend` does not lead to correct directory create_news_post_dir(instance, prepend=prepend)
[ "def post_media_dir(instance, filename, prepend=settings.MEDIA_ROOT):\n if instance.ministry:\n _ministry = instance.ministry\n elif instance.campaign:\n _ministry = instance.campaign.ministry\n else:\n e = 'There was an unknown error finding a dir for %s' % instance.title\n raise AttributeError(e)\n\n return path.join(generic_media_dir(_ministry, prepend=prepend),\n 'post_media', filename)", "def upload(self, post):\n # TODO: handle filename conflicts\n directory = \".\".join(self.filename.split(\".\")[:-1])\n\n self.abspath = os.path.join(self.root_dir, directory)\n self.localpath = os.path.join(\"/static/gallery\", directory)\n if not os.path.exists(self.abspath):\n os.makedirs(self.abspath)\n print(\"save picture\", os.path.join(self.abspath, self.filename))\n post.save(os.path.join(self.abspath, self.filename))", "def create_folder():\r\n my_dir = (str(BLOG2_SCRAPE))\r\n my_dir = my_dir[: my_dir.find('.wordpress')]\r\n my_dir = my_dir.replace('https://', '')\r\n\r\n # If it doesn't exist then create it.\r\n check_folder = os.path.isdir(my_dir)\r\n if not check_folder:\r\n os.makedirs(my_dir)\r\n print('created folder : ' +my_dir)\r\n\r\n # Change dir to new folder.\r\n os.chdir(my_dir)", "def mkdir(self, *args):\n p = self.join(*args)\n error.checked_call(os.mkdir, os.fspath(p))\n return p", "def _create_dir(filename):\n head = os.path.dirname(filename)\n if head != '' and not os.path.isdir(head):\n os.makedirs(head)", "def create_dirs(conn_id: str, *args, **kwargs) -> None:\r\n\r\n raw = const.get_raw_image_dir(conn_id)\r\n if not os.path.exists(raw):\r\n os.makedirs(raw)\r\n\r\n processed = const.get_processed_image_dir(conn_id)\r\n if not os.path.exists(processed):\r\n os.makedirs(processed)", "def create_dir(cls, relpath):\r\n safe_mkdir(os.path.join(cls.build_root, relpath))", "def create_directory():\n try:\n if os.path.isdir(\"./imagesFromTweets\") != True:\n os.makedirs(\"./imagesFromTweets\")\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise", "def create_snapshots_folder(self):\n snapshots_path = os.path.join(settings.MEDIA_ROOT, self.type, 'snapshots')\n try:\n os.makedirs(os.path.dirname(snapshots_path))\n except OSError:\n # directory exists, pass .\n pass\n except Exception as e:\n logger.exception(e)\n return\n return snapshots_path", "def mkDir(contentDirPath):\n\tif os.path.isdir(contentDirPath):\n\t\tprint \"Directory %s already exists.\" % contentDirPath\n\t\tclearLocalDir(contentDirPath)\n\t\treturn;\n\telse:\n\t\tos.mkdir(contentDirPath)\n\t\tprint \"Created directory %s.\" % contentDirPath", "def create_dir(self, relpath):\n return self.get_dir(relpath).create()", "def create_gallery(jekyll_site_path, gallery_name):\n\n gallery_path = os.path.join(jekyll_site_path, 'images', 'galleries', gallery_name)\n\n if not os.path.exists(gallery_path):\n os.makedirs(gallery_path)\n\n print(f\"Created gallery path {gallery_path}\")\n\n return gallery_path", "def directory(self) -> Path:\n (directory := Path(\"markdown\").resolve(strict=False)).mkdir(exist_ok=True, parents=True)\n return directory", "def create_directory(self):\n dirname = self.name+\"_distillates\"\n i = 1\n while True:\n try:\n mkdir(dirname)\n return dirname\n except OSError:\n dirname = self.name+\"_distillates_{0}\".format(i)\n i += 1", "def create_directory(self, directory: str):\n pass", "def prepare_destination(self):\n self.movie_root_path = self.config.share_movie_root_path % (\n self.share_path, self.title)\n\n if os.path.isdir(self.movie_root_path):\n if self.capacity_reached():\n Logger.log(\n '[!] Capacity reached. Skipping adding movie %s.' % self.title)\n else:\n if not os.path.isdir(self.movie_root_path):\n Logger.log('[+] Adding Movie: %s' % self.title)\n os.mkdir(self.movie_root_path)", "def public():\n require('PROJECT_NAME')\n\n media_dir = utils.home('public', env.PROJECT_NAME, 'media')\n static_dir = utils.home('public', env.PROJECT_NAME, 'static')\n\n run('mkdir -p {}'.format(media_dir))\n run('mkdir -p {}'.format(static_dir))", "def __create_img_dir(static_path, image_root_path):\n if not os.path.exists(static_path+image_root_path):\n os.mkdir(static_path+image_root_path)", "def create_media(media):\n return \"\\n\".join(map(lambda m: create_image(m[\"media_url\"], \"small\"), media))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decrypts input ciphertext using a symmetric CryptoKey.
def decrypt_symmetric(self, ciphertext): from google.cloud import kms_v1 # Creates an API client for the KMS API. client = kms_v1.KeyManagementServiceClient() # The resource name of the CryptoKey. name = client.crypto_key_path_path(self.project_id, self.location_id, self.key_ring_id, self.crypto_key_id) # Use the KMS API to decrypt the data. response = client.decrypt(name, ciphertext) return response.plaintext
[ "def decrypt_symmetric(secret_key, ciphertext, ttl=None):\n f = Fernet(secret_key)\n # fernet requires the ciphertext to be bytes, it will raise an exception\n # if it is a string\n return f.decrypt(bytes(ciphertext), ttl)", "def decrypt(self, ciphertext, key):\n iv = ciphertext[:AES.block_size]\n cipher = AES.new(key, AES.MODE_CBC, iv, segment_size=64)\n plaintext = cipher.decrypt(ciphertext[AES.block_size:])\n return self.pkcs7_unpad(plaintext)", "def decrypt_data(self, enc_data: str, enc_symmetric_key):\n # Decrypts the encrypted symmetric key\n #\n unseal_box = SealedBox(self._user_private_key)\n symmetric_key = unseal_box.decrypt(enc_symmetric_key)\n\n # Decrypts the data using the symmetric key\n #\n symmetric_box = nacl.secret.SecretBox(symmetric_key)\n plain_text = symmetric_box.decrypt(enc_data)\n\n return plain_text", "def decrypt(key, ciphertext):\n data = fk(keyGen(key)[1], ip(ciphertext))\n return fp(fk(keyGen(key)[0], swapNibbles(data)))", "def decrypt(private_key, ciphertext):\n if len(ciphertext) < 512 + 16:\n return None\n msg_header = ciphertext[:512]\n msg_iv = ciphertext[512:512+16]\n msg_body = ciphertext[512+16:]\n try:\n symmetric_key = PKCS1_OAEP.new(private_key).decrypt(msg_header)\n except ValueError:\n return None\n if len(symmetric_key) != 32:\n return None\n return AES.new(symmetric_key,\n mode=AES.MODE_CFB,\n IV=msg_iv).decrypt(msg_body)", "def decrypt_cbc(key, ciphertext):\n\tmessage = ''\n\tfor i in range(0, len(ciphertext)/16 - 1):\n\t\tiv = ciphertext[i*16:(i+1)*16]\n\t\tinputblock = ciphertext[(i+1)*16:(i+2)*16]\n\t\tcipher = AES.new(key, AES.MODE_CBC, iv)\n\t\tmessage +=cipher.decrypt(inputblock)\n\tif ord(message[-1]) <=16:\n\t\tmessage = message[:-ord(message[-1])]\n\treturn message", "def decrypt(private_key, ciphertext):\n return private_key.decrypt(\n ciphertext,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )", "def decrypt(cypher, priv_key):\n\n if not isinstance(priv_key, key.PrivateKey):\n raise TypeError(\"You must use the private key with decrypt\")\n\n return gluechops(cypher, priv_key.d, priv_key.n, decrypt_int)", "def decryptEncryptionKey(cipherString, key):\n\tencryptionType, iv, cipherText, mac = decodeCipherString(cipherString)\n\t# log.debug(\"mac:%s\", mac)\n\t# log.debug(\"iv:%s\", iv)\n\t# log.debug(\"ct:%s\", cipherText)\n\tassert mac is None\n\tif encryptionType != 0:\n\t\traise UnimplementedError(\"can not decrypt type:%s\" % encryptionType)\n\tcipher = cryptography.hazmat.primitives.ciphers.Cipher(\n\t algorithms.AES(key), modes.CBC(iv), backend=default_backend())\n\tdecryptor = cipher.decryptor()\n\tplainText = decryptor.update(cipherText) + decryptor.finalize()\n\t# log.debug(\"mackey before unpad:%s\", plainText[32:])\n\treturn plainText[:32], plainText[32:64]", "def decrypt(ciphertext):\n return ciphertext", "def aes_decrypt(encrypted_data, key):\r\n cipher = aes_cipher_from_key(key)\r\n padded_data = cipher.decrypt(encrypted_data)\r\n return unpad(padded_data)", "def aes256_cbc_decrypt(ciphertext, key, iv):\r\n block_cipher = aespython.aes_cipher.AESCipher( key_expander.expand(map(ord, key)) )\r\n stream_cipher = aespython.cbc_mode.CBCMode(block_cipher, 16)\r\n stream_cipher.set_iv(bytearray(iv))\r\n plaintext = bytearray()\r\n for i in xrange(0, len(ciphertext), 16):\r\n plaintext.extend( stream_cipher.decrypt_block(map(ord, ciphertext[i:i+16])) )\r\n padding_len = plaintext[-1]\r\n # check for PKCS7 padding\r\n if not (1 <= padding_len <= 16 and plaintext.endswith(chr(padding_len) * padding_len)):\r\n raise ValueError('incorrect password')\r\n return str(plaintext[:-padding_len])", "def decrypt(ciphertext, key, iv):\n cipher = AES.new(key, AES.MODE_CFB, iv)\n msg = cipher.decrypt(ciphertext)\n return msg", "def decrypt(self, key, data):\n # type: (bytes, bytes) -> bytes\n iv, secret = self.parse_key(key)\n cipher = EVP.Cipher('aes_256_cbc', secret, iv, op=0)\n return cipher.update(data) + cipher.final()", "def rsa_decrypt(cypher, privatekey):\r\n \r\n # A key object is created to interact with the PyCrypto\r\n # encryption suite. The object contains key data and\r\n # the necessary rsa functions.\r\n temp_key_obj = _rsa_keydict_to_keyobj(privatekey = privatekey) \r\n \r\n return _rsa_gluechops(cypher, temp_key_obj, temp_key_obj.decrypt)", "def decrypt(\r\n key: bytes,\r\n cipher_text: bytes,\r\n) -> str:\r\n block_size = 16\r\n iv = cipher_text[:block_size]\r\n cipher = AES.new(key, AES.MODE_CBC, iv)\r\n plain_text = cipher.decrypt(cipher_text[block_size:]).decode('utf-8')\r\n return _unpad(plain_text)", "def decrypt_symmetric(filename, password):\n built = read_bytes_in_image(filename)\n key = keygen(password)\n return read_stream(key, built)", "def decrypt(self, secret_key):\n if self.plaintext is not None:\n return\n if not self.ciphertext:\n raise Exception(\"Must define ciphertext before unlocking.\")\n\n # Decrypt ciphertext and remove padding\n iv = self.ciphertext[0:16]\n aes = AES.new(secret_key, AES.MODE_CFB, iv)\n plaintext = self._unpad(aes.decrypt(self.ciphertext[16:]))\n\n # Verify decrypted plaintext against hash\n if not self.validate(plaintext):\n raise ValueError(\"Invalid key or ciphertext!\")\n\n self.plaintext = plaintext", "def decrypt(message, key):\n nonce, secret = message[:8], message[8:]\n cipher = Salsa20.new(key, nonce=nonce)\n return cipher.decrypt(secret)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method that decrypts a file using the decrypt_symmetric method and writes the output of this decryption to a file named gcpkey.json
def decrypt_from_file(self, file_path): # open and decrypt byte file f = open(file_path, "rb").read() decrypted = self.decrypt_symmetric(f) json_string = decrypted.decode("utf-8") # write string to json file destination_file_name = Path("downloaded-key/gcp-key.json") destination_file_name.touch(exist_ok=True) # creates file if it does not yet exist destination_file_name.touch(exist_ok=True) # creates file if it does not yet exist destination_file_name.write_text(json_string)
[ "def decrypt_symmetric(filename, password):\n built = read_bytes_in_image(filename)\n key = keygen(password)\n return read_stream(key, built)", "def decrypt(directory, keyfile):\n credentials = service_account.Credentials.from_service_account_file(keyfile)\n # Creates an API client for the KMS API.\n kms_client = googleapiclient.discovery.build('cloudkms', 'v1', credentials=credentials)\n\n # The resource name of the CryptoKey.\n name = 'projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}'.format(\n PROJECT_ID, LOCATION_ID, KEY_RING_ID, CRYPTO_KEY_ID)\n\n ciphertext_file_name = directory + \"/data_encryption_password.kms\"\n\n if not os.path.exists(ciphertext_file_name):\n print(\"Error: File not found {}\".format(ciphertext_file_name))\n sys.exit()\n\n # Read encrypted data from the input file.\n with io.open(ciphertext_file_name, 'rb') as ciphertext_file:\n ciphertext = ciphertext_file.read()\n\n # Use the KMS API to decrypt the data.\n crypto_keys = kms_client.projects().locations().keyRings().cryptoKeys()\n request = crypto_keys.decrypt(\n name=name,\n body={'ciphertext': base64.b64encode(ciphertext).decode('ascii')})\n response = request.execute()\n plaintext = base64.b64decode(response['plaintext'].encode('ascii'))\n password = plaintext\n\n for root, dirs, files in os.walk(directory):\n for file in files:\n if file.endswith(\".enc\"):\n source_file_name = os.path.join(root, file)\n encryptor_decryptor.decrypt_file(password, source_file_name)\n os.remove(source_file_name)\n\n os.remove(ciphertext_file_name)", "def decrypt_symmetric(self, ciphertext):\n from google.cloud import kms_v1\n\n # Creates an API client for the KMS API.\n client = kms_v1.KeyManagementServiceClient()\n\n # The resource name of the CryptoKey.\n name = client.crypto_key_path_path(self.project_id, self.location_id, self.key_ring_id,\n self.crypto_key_id)\n # Use the KMS API to decrypt the data.\n response = client.decrypt(name, ciphertext)\n return response.plaintext", "def decrypt(self, filepath):\n\n # filename and paths to tmp directory\n filename = os.path.basename(filepath)\n decrypted_filepath = self.tmp_location + '/' + filename\n\n # read the ciphertext, decrypt it, write it to the tmp location, then\n # overwrite the encrypted file with the decrypted file, finally remove\n # the encrypted file from the tmp location\n try:\n with open(filepath, 'rb') as f:\n ciphertext = f.read()\n cleartext = self.env_aead.decrypt(ciphertext, b'')\n with open(decrypted_filepath, 'wb') as f:\n f.write(cleartext)\n shutil.copyfile(decrypted_filepath, filepath)\n os.unlink(decrypted_filepath)\n except TinkError as decryption_error:\n error_and_exit(str(decryption_error))\n\n return decrypted_filepath", "def decrypt_file(input_path, output_path, priv_key_path='private.pem', pwd=None):\n # Import private key\n with open(priv_key_path, 'rb') as priv_fd:\n rsa = RSA.importKey(priv_fd.read(), passphrase=pwd)\n\n # Read the encrypted file\n with open(input_path, 'rb') as in_fd:\n key_size = int(in_fd.readline())\n padding_len = int(in_fd.readline())\n encrypted_key = in_fd.read(key_size)\n iv = in_fd.read(16)\n # decrypt symmetric encryption key with the private key\n secret_key = rsa.decrypt(encrypted_key)\n enc_data = in_fd.read()\n\n aes = AES.new(secret_key, AES.MODE_CBC, iv)\n # decrypt data\n data = aes.decrypt(enc_data)\n\n # remove padding\n data = data[:-padding_len]\n #data = data\n #data = data.decode('utf8').replace('\\x00', '').encode()\n\n with open(output_path, 'wb') as out_fd:\n out_fd.write(data)", "def decrypt(project_id, location_id, key_ring_id, crypto_key_id,\n ciphertext_file_name, plaintext_file_name):\n\n # Creates an API client for the KMS API.\n kms_client = googleapiclient.discovery.build('cloudkms', 'v1')\n\n # The resource name of the CryptoKey.\n name = 'projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}'.format(\n project_id, location_id, key_ring_id, crypto_key_id)\n\n # Read encrypted data from the input file.\n with io.open(ciphertext_file_name, 'rb') as ciphertext_file:\n ciphertext = ciphertext_file.read()\n\n # Use the KMS API to decrypt the data.\n crypto_keys = kms_client.projects().locations().keyRings().cryptoKeys()\n request = crypto_keys.decrypt(\n name=name,\n body={'ciphertext': base64.b64encode(ciphertext).decode('ascii')})\n response = request.execute()\n plaintext = base64.b64decode(response['plaintext'].encode('ascii'))\n\n # Write the decrypted data to a file.\n with io.open(plaintext_file_name, 'wb') as plaintext_file:\n plaintext_file.write(plaintext)\n\n print('Saved plaintext to {}.'.format(plaintext_file_name))", "def save_key_der(self, file):\n bio = BIO.openfile(file, 'wb')\n return self.save_key_der_bio(bio)", "def decrypt(filename, key):\n f = Fernet(key)\n with open(filename, \"rb\") as file:\n # read the encrypted data\n encrypted_data = file.read()\n # decrypt data\n decrypted_data = f.decrypt(encrypted_data)\n # write the original file\n with open(filename, \"wb\") as file:\n file.write(decrypted_data)", "def _decrypt(self):\n self._outfile = os.path.join(self.dest, self.plain_file)\n self._infile = self.encrypted_file\n self._log.info(\"Decrypting file '%s' to '%s'\", self.encrypted_file, self._outfile)\n with open(self.encrypted_file, \"rb\") as enc_file:\n openssl(\n \"enc\",\n \"-aes-256-cbc\",\n \"-d\",\n \"-pass\",\n \"file:{secret}\".format(secret=self.secret.keyfile),\n _in=enc_file,\n _out=self._outfile,\n )\n self._log.info(\"File '%s' decrypted to '%s'\", self.encrypted_file, self._outfile)\n return True", "def _decrypt(self, src_filepath, dest_filepath):\r\n self.log.info(\"Decrypting file {0} to {1}.\".format(src_filepath, dest_filepath))\r\n\r\n gpg = gnupg.GPG(options=self.gpg_options)\r\n key_data = open(self.key_file, mode='rb').read()\r\n import_result = gpg.import_keys(key_data)\r\n self.log.info(\"Key import results: {0}\".format(import_result.results))\r\n\r\n with open(src_filepath, 'rb') as f:\r\n status = gpg.decrypt_file(f,\r\n passphrase=self._passphrase,\r\n output=dest_filepath)\r\n self.log.info(\"ok: {0}, status:{1}, stderr: {2}\".format(status.ok, status.status, status.stderr))\r\n\r\n if status.ok and self.remove_encrypted:\r\n os.remove(src_filepath)\r\n\r\n if not status.ok:\r\n raise AirflowException(\"Failed to decrypt file {0}: {1}\"\r\n .format(src_filepath, status.stderr))\r\n\r\n self.log.info(\"Completed file decryption.\")", "def decrypt_file(self, file_name, key):\n with open(file_name, 'rb') as fo:\n try:\n ciphertext = fo.read()\n except:\n print \"[-] Error opening file {0} for reading.\".format(file_name)\n return\n try:\n dec = self.decrypt(ciphertext, key)\n except:\n print \"[-] Decryption failed.\"\n return\n\n with open(file_name[:-4], 'wb') as fo:\n try:\n fo.write(dec)\n except:\n print \"[-] Error writing out file {0}\".format(file_name[:-4])\n return\n\n os.chmod(file_name[:-4], 0600)\n return file_name[:-4]", "def decrypt(password, credfile = \"credentials_encrypted.txt\"):\n\tif not os.path.exists(credfile):\n\t\tprint('The credential file %s does not exist. Quitting...' % (credfile))\n\t\tsys.exit()\n\n\t#open file\n\tfileObj = open(credfile)\n\tcontent = fileObj.read()\n\tfileObj.close()\n\n\t#decrypt file\n\tdecrypted = transpositionEncrypt.decryptedMessage(password, content)\n\n\t#output to file\n\toutputFileObj = open('credentials.txt', 'w')\n\toutputFileObj.write(translated)\n\toutputFileObj.close()\n\n\t#handle credentials and create the exchange object\n\t###HANDLE FILE\n\n\t#remove the unencrypted file\n\tos.remove(\"credentials.txt\")\n\n\treturn Exchanges", "def decrypt_file(filename, key):\n f = Fernet(key)\n with open(filename, \"rb\") as file:\n # read the encrypted data\n encrypted_data = file.read()\n # decrypt data\n decrypted_data = f.decrypt(encrypted_data)\n # delete file\n remove(filename)\n # generate new filename\n new_filename = generate_new_filename(filename, key, False)\n # write the encrypted file\n with open(new_filename, \"wb\") as file:\n print(\"Decrypted: \" + new_filename)\n file.write(decrypted_data)\n\n return new_filename", "def decrypt_file(file, key):\n if not file.endswith('.enc'):\n raise ValueError(\"%s does not end with .enc\" % file)\n\n fer = Fernet(key)\n\n with open(file, 'rb') as f:\n decrypted_file = fer.decrypt(f.read())\n\n with open(file[:-4], 'wb') as f:\n f.write(decrypted_file)\n\n os.chmod(file[:-4], 0o600)", "def decrypt(infile, metadatafile, outfile, sslnoverify=False):\n metadata = {}\n # Load the metadata\n with open(metadatafile, \"r\") as mdf:\n try:\n metadata = json.load(mdf)\n except:\n print \"Error loading metadata!\"\n return\n salt = base64.b64decode(metadata['salt'])\n dectime = metadata['dectime']\n # Try to get private keys and decrypt key pieces\n pieces = []\n while len(pieces) < metadata['n']:\n try:\n server, blob = metadata['locks'].pop()\n except:\n print \"Unable to gather enough keys to decrypt!\"\n return\n server = tuple(server)\n privkey = get_privkey(server, dectime, salt, sslnoverify=sslnoverify)\n if not privkey:\n print \"Error getting private key from \" + server\n continue\n pieces.append(seccure.decrypt(base64.b64decode(blob), privkey, curve=mycurve))\n\n # Now try to recover the key\n if metadata['n'] == 1:\n symkey = pieces[0]\n else:\n symkey = join_key(pieces)\n if not symkey:\n print \"Unable to recover key!\"\n return\n print \"Decrypting file...\"\n # Do the decryption\n decrypt_file(symkey, infile, outfile)", "def decrypt_file(self, input_file_name='', output_file_name=''):\n\n # Checking if input and output files selected right\n assert input_file_name and isfile(input_file_name), \"Input file wasn't selected!\"\n assert output_file_name, \"Output file wasn't selected!\"\n\n with open(output_file_name, 'wb') as output_file:\n # To iterate file as int values, I'm using generator\n input_file = self._open_file_longint(input_file_name)\n try:\n alpha = input_file.__next__()\n beta = input_file.__next__()\n except StopIteration:\n raise AssertionError(\"Input file is empty! Nothing to decrypt.\")\n\n x = self.keys['private']\n p = self.keys['public']['p']\n\n while alpha and beta:\n message_byte = bytes(chr((beta % p * (pow(alpha, (p - 1 - x), p))) % p), \"ascii\")\n output_file.write(message_byte)\n try:\n alpha = input_file.__next__()\n beta = input_file.__next__()\n except StopIteration:\n alpha = 0\n beta = 0\n return 1", "def decrypt(credfile, passphrase=None):\n gpg_cmd = ['gpg']\n\n if passphrase:\n gpg_cmd = GPGCommunicator._add_passphrase(gpg_cmd, passphrase)\n\n gpg_cmd += ['-d', credfile]\n\n (stdout, stderr, retcode) = Shell.pipe(gpg_cmd)\n\n if retcode == 0:\n return b64decode(stdout)\n else:\n err = \"Keyfile couldn't be decoded (return code: %d).\" % retcode\n raise GPGCommunicator.KeyfileDecodeError(err)\n\n return \"\"", "def do_pgp_sym_decrypt_and_encrypt_test(self, fips_mode = False):\n sql_file = local_path('query08.sql')\n out_file = (local_path('query08.out') if not fips_mode else local_path('query08_fips.out'))\n ans_file = (local_path('query08.ans') if not fips_mode else local_path('query08_fips.ans'))\n self.doPGCRYPT(sql_file, out_file, ans_file)", "def decrypt_file(self, key):\n k, iv, meta_mac = MegaCrypto.get_cipher_key(key)\n ctr = Crypto.Util.Counter.new(\n 128, initial_value=(\n (iv[0] << 32) + iv[1]) << 64)\n cipher = Crypto.Cipher.AES.new(\n MegaCrypto.a32_to_str(k),\n Crypto.Cipher.AES.MODE_CTR,\n counter=ctr)\n\n self.pyfile.setStatus(\"decrypting\")\n self.pyfile.setProgress(0)\n\n file_crypted = encode(self.last_download)\n file_decrypted = file_crypted.rsplit(self.FILE_SUFFIX)[0]\n\n try:\n f = open(file_crypted, \"rb\")\n df = open(file_decrypted, \"wb\")\n\n except IOError, e:\n self.fail(e.message)\n\n encrypted_size = os.path.getsize(file_crypted)\n\n checksum_activated = self.config.get(\n \"activated\", default=False, plugin=\"Checksum\")\n check_checksum = self.config.get(\n \"check_checksum\", default=True, plugin=\"Checksum\")\n\n cbc_mac = MegaCrypto.Checksum(\n key) if checksum_activated and check_checksum else None\n\n progress = 0\n for chunk_start, chunk_size in MegaCrypto.get_chunks(encrypted_size):\n buf = f.read(chunk_size)\n if not buf:\n break\n\n chunk = cipher.decrypt(buf)\n df.write(chunk)\n\n progress += chunk_size\n self.pyfile.setProgress(int((100.0 / encrypted_size) * progress))\n\n if checksum_activated and check_checksum:\n cbc_mac.update(chunk)\n\n self.pyfile.setProgress(100)\n\n f.close()\n df.close()\n\n self.log_info(_(\"File decrypted\"))\n os.remove(file_crypted)\n\n if checksum_activated and check_checksum:\n file_mac = cbc_mac.digest()\n if file_mac == meta_mac:\n self.log_info(_('File integrity of \"%s\" verified by CBC-MAC checksum (%s)') %\n (self.pyfile.name.rsplit(self.FILE_SUFFIX)[0], meta_mac))\n else:\n self.log_warning(_('CBC-MAC checksum for file \"%s\" does not match (%s != %s)') %\n (self.pyfile.name.rsplit(self.FILE_SUFFIX)[0], file_mac, meta_mac))\n self.checksum_failed(\n file_decrypted, _(\"Checksums do not match\"))\n\n self.last_download = decode(file_decrypted)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Downloads key for configured service account and stores it in the folder generatedkey/
def download_key_from_blob(self): source_blob_name = "generated-keys/{}".format(self.service_account_email) destination_name = self.service_account_email # generate destination folder and file if they do not yet exist Path("downloaded-key/").mkdir(parents=True, exist_ok=True) # creates folder if not exists folder = Path("downloaded-key/") # folder where all the newly generated keys go destination_file_name = folder / "{}".format(destination_name) # file named after service-account name destination_file_name.touch(exist_ok=True) # download the file and store it locally storage_client = storage.Client() bucket = storage_client.get_bucket(self.bucket_name) blob = bucket.blob(source_blob_name) blob.download_to_filename(destination_file_name) # prints source and destination indicating successful download print('Encrypted key downloaded to -----> \n {}.'.format( source_blob_name, destination_file_name)) return destination_file_name
[ "def download_default_key_pair():\n pass", "def download_key():\n data = check_args(('cloudProvider', ))\n provider = jobs.init_provider(data, True)\n key = encrypt_key(provider.get_key(), data['username'])\n return make_response(keyName=provider.keyname, key=key)", "def downloadauthkey():\n payload = _generate_auth_token()\n\n iso = pycdlib.PyCdlib()\n iso.new(joliet=3)\n iso.add_fp(BytesIO(payload), len(payload), '/JWT.;1', joliet_path=\"/jwt\")\n buf = BytesIO()\n iso.write_fp(buf)\n iso.close()\n buf.seek(0)\n return send_file(buf, as_attachment=True, attachment_filename=\"%s-key.iso\" % \\\n session['iyo_user_info']['username'])", "def request_new_account_key(self) -> dict:\n return self._put(\"/spPortal/account/key/generate\")", "def generate_key():\r\n # generating key\r\n key = Fernet.generate_key()\r\n\r\n key_dir = os.path.join(os.path.dirname(__file__), \"resources/key\")\r\n\r\n # writing key in file\r\n with open(key_dir, \"wb\") as keyFile:\r\n keyFile.write(key)", "def generate_json_key_file():\n load_dotenv()\n path = pathlib.Path(__file__).parent.absolute()\n rsa_key = os.getenv(\"GOOGLE_PRIVATE_KEY\").replace(\"\\\\n\", \"\\n\")\n json_key = {\"type\": \"service_account\",\n \"project_id\": os.getenv(\"GOOGLE_PROJECT_ID\"),\n \"private_key_id\": os.getenv(\"GOOGLE_PRIVATE_KEY_ID\"),\n \"private_key\": rsa_key,\n \"client_email\": os.getenv(\"GOOGLE_CLIENT_EMAIL\"),\n \"client_id\": os.getenv(\"GOOGLE_CLIENT_ID\"),\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://oauth2.googleapis.com/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\": os.getenv(\"GOOGLE_CLIENT_X509_CERT_URL\")\n }\n with open(f\"{path}/google_key.json\", \"w\", encoding=\"utf-8\") as json_file:\n json.dump(json_key, json_file, ensure_ascii=False, indent=4)", "def private_key_file(self):", "def download_key(email, domain):\n email_dir = '{}/{}'.format(ACCOUNT_STORE_FOLDER, email)\n domain_key_file = '{}/{}.key'.format(email_dir, domain)\n output_path = '{}/{}'.format(email_dir, domain)\n print(output_path)\n account_path = '{}/{}.json'.format(email_dir, email)\n account = load_account(account_path)\n print(account)\n print(domain_key_file)\n\n bak_path = None\n if os.path.isdir(output_path):\n if len(os.listdir(output_path)) != 0:\n bak_path = shutil.move(output_path, '{}.bak'.format(output_path))\n os.mkdir(output_path)\n else:\n pass\n\n elif not os.path.isdir(output_path):\n os.mkdir(output_path)\n\n try:\n issue(LETS_ENCRYPT_PRODUCTION, account, [domain], DEFAULT_CERT_KEY_SIZE, domain_key_file , None, output_path)\n if bak_path:\n shutil.rmtree(bak_path)\n except:\n shutil.rmtree(output_path)\n if bak_path:\n shutil.move(bak_path, '{}'.format(output_path))\n print('issue failed, limit rate for this domain or some other reason')\n return False\n return True", "def generate_key():\n key = Fernet.generate_key()\n with open(\"key.key\", \"wb\") as key_file:\n key_file.write(key)", "def _keypath(self) -> pathlib.Path:\n home = pathlib.Path.home()\n keyfile = home / \".cmdc\" / \"apikey\"\n keyfile.parent.mkdir(parents=True, exist_ok=True)\n return keyfile", "def util_generate_key(conf_file=None):\n keyname = DebRepo(**config(conf_file=conf_file)).generate_key()\n print(keyname)", "def create_key(service_account_email):\n\n credentials = service_account.Credentials.from_service_account_file(\n filename=os.environ['GOOGLE_APPLICATION_CREDENTIALS'],\n scopes=['https://www.googleapis.com/auth/cloud-platform'])\n\n service = googleapiclient.discovery.build(\n 'iam', 'v1', credentials=credentials)\n\n key = service.projects().serviceAccounts().keys().create(\n name='projects/-/serviceAccounts/' + service_account_email, body={}\n ).execute()\n\n print('Created key: ' + key['name'])", "def api_keypath():\n return Path(\"~/.ncbi/api_key\")", "def GeneratePrivateKey(options):\n StepBanner('GEN PRIVATE KEY', 'Generating fresh private key')\n tempdir = tempfile.mkdtemp(dir=PnaclDirs.OutputDir())\n ext_dir = J(tempdir, 'dummy_extension')\n os.mkdir(ext_dir)\n PnaclPackaging.GenerateManifests(ext_dir,\n '0.0.0.0',\n 'dummy_arch',\n [],\n False)\n CRXGen.RunCRXGen(options.chrome_path, ext_dir)\n shutil.copy2(J(tempdir, 'dummy_extension.pem'),\n PnaclDirs.OutputDir())\n shutil.rmtree(tempdir)\n print ('\\n<<< Fresh key is now in %s/dummy_extension.pem >>>\\n' %\n PnaclDirs.OutputDir())", "def generate_keys(key_file):\n sk = SecretKey()\n sk.save(key_file)", "def CreateKeyFile():\n keyfile = tempfile.mkstemp()[1]\n cmd = [\n 'openssl',\n 'genrsa',\n '-out', keyfile,\n '2048'\n ]\n _RunCommand(cmd)\n return keyfile", "def create_key ():", "def private_key(self):", "def load_key():\r\n return open(\"secret.key\", \"rb\").read()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate overlap among trajectories
def trajectory_overlap(gt_trajs, pred_traj): max_overlap = 0 max_index = 0 for t, gt_traj in enumerate(gt_trajs): s_viou = viou_sx(gt_traj['sub_traj'], gt_traj['duration'], pred_traj['sub_traj'], pred_traj['duration']) o_viou = viou_sx(gt_traj['obj_traj'], gt_traj['duration'], pred_traj['obj_traj'], pred_traj['duration']) so_viou = min(s_viou, o_viou) if so_viou > max_overlap: max_overlap = so_viou max_index = t return max_overlap, max_index
[ "def overlap_cost(track_a, track_b):\n return 1 - overlap(track_a.bbox, track_b.bbox)", "def poverlap(t1, t2, size1, size2):\n x0 = t1[0]\n y0 = t1[1]\n x1 = t1[0] + size1[0]\n y1 = t1[1] + size1[1]\n\n x2 = t2[0]\n y2 = t2[1]\n x3 = t2[0] + size2[0]\n y3 = t2[1] + size2[1]\n \n ol = max(0, min(x1, x3) - max(x0, x2)) * max(0, min(y1, y3) - max(y0, y2))\n\n return ol / float(2*(size2[0]*size2[1]) - ol)", "def _newovertest():\n testranges = [Range(450,464), Range(435,465), Range(0,800)]\n results = [4, 10, 34-12 + 440-37 + 800-460]\n qo = QueryObject(\"foo\", [Range(12,34), Range(37,440), Range(460,800)])\n for i,r in enumerate(testranges):\n retlist = qo.newoverlap(r)\n for ret in retlist:\n print(ret[0], ret[1])", "def onGetOverlap(self, evt):\n\t\t#GET THE ARRAYS\n\t\ttargets1 = self.panel1.getTargets('Picked')\n\t\ttargets2 = self.panel2.getTargets('Picked')\n\t\ta1 = self.targetsToArray(targets1)\n\t\ta2 = self.targetsToArray(targets2)\n\n\t\t#GET THE POINT VALUES\n\t\tapTiltTransform.setPointsFromArrays(a1, a2, self.data)\n\n\t\t#CHECK IF WE HAVE POINTS\n\t\tif len(a1) == 0 or len(a2) == 0:\n\t\t\tself.statbar.PushStatusText(\"ERROR: Cannot get overlap. Not enough picks\", 0)\n\t\t\tdialog = wx.MessageDialog(self.frame, \"Cannot get overlap.\\nThere are no picks.\",\\\n\t\t\t\t'Error', wx.OK|wx.ICON_ERROR)\n\t\t\tdialog.ShowModal()\n\t\t\tdialog.Destroy()\n\t\t\treturn False\n\n\t\tif self.data['theta'] == 0.0 and self.data['thetarun'] is False:\n\t\t\tself.statbar.PushStatusText(\"ERROR: Cannot get overlap. No alignment parameters.\", 0)\n\t\t\tdialog = wx.MessageDialog(self.frame, \"Cannot get overlap.\\nNo alignment parameters.\",\\\n\t\t\t\t'Error', wx.OK|wx.ICON_ERROR)\n\t\t\tdialog.ShowModal()\n\t\t\tdialog.Destroy()\n\t\t\treturn False\n\n\t\t#GET IMAGES\n\t\timage1 = numpy.asarray(self.panel1.imagedata, dtype=numpy.float32)\n\t\timage2 = numpy.asarray(self.panel2.imagedata, dtype=numpy.float32)\n\n\t\t#GET THE VALUE\n\t\tbestOverlap, tiltOverlap = apTiltTransform.getOverlapPercent(image1, image2, self.data)\n\t\toverlapStr = str(round(100*bestOverlap,2))+\"% and \"+str(round(100*tiltOverlap,2))+\"%\"\n\t\tself.statbar.PushStatusText(\"Overlap percentage of \"+overlapStr, 0)\n\t\tself.data['overlap'] = round(bestOverlap,5)", "def _overlaps_with_other_teams_penalty(self):\n r = 0\n combs = combinations(range(self.n), 2)\n for comb in combs:\n r -= self.schedules[comb[0]].overlaps_by(self.schedules[comb[1]])\n return r", "def listOfOverlappingTTPairs():\n listOfHalfModules = listOfTTHalfModules()\n ttmap = TTModulesMap_instance\n pairs = []\n regions = {'A':1, 'B':2, 'C':3}\n print \"Overlapping TT half modules:\"\n for hm1 in listOfHalfModules:\n for hm2 in listOfHalfModules:\n # they must be different\n if hm1 == hm2: continue\n # they must be both on top or both on bottom\n if locateTTHalfModule(hm1)[3] != locateTTHalfModule(hm2)[3]: continue\n # they must be on the same layer\n if locateTTHalfModule(hm1)[0] != locateTTHalfModule(hm2)[0]: continue\n # avoid duplicates\n if (hm1, hm2) in pairs: continue\n if (hm2, hm1) in pairs: continue\n # they must be contiguous:\n if (locateTTHalfModule(hm1)[1] == locateTTHalfModule(hm2)[1]):\n if (abs(locateTTHalfModule(hm1)[2] - locateTTHalfModule(hm2)[2]) == 1):\n pairs.append( (hm1, hm2) )\n else:\n num1 = locateTTHalfModule(hm1)[2]\n num2 = locateTTHalfModule(hm2)[2]\n max1 = ttmap.numberOfModules[locateTTHalfModule(hm1)[0]]['Region'+locateTTHalfModule(hm1)[1]] - 1\n max2 = ttmap.numberOfModules[locateTTHalfModule(hm2)[0]]['Region'+locateTTHalfModule(hm2)[1]] - 1\n nreg1 = regions[locateTTHalfModule(hm1)[1]]\n nreg2 = regions[locateTTHalfModule(hm2)[1]]\n if ( (num1==max1 and num2==0 and nreg2-nreg1==1) or (num2==max2 and num1==0 and nreg1-nreg2==1) ):\n pairs.append( (hm1, hm2) )\n print '\\t', hm1, hm2\n ## - same region\n #if ((abs(locateTTHalfModule(hm1)[2] - locateTTHalfModule(hm2)[2]) != 1)\n # and (locateTTHalfModule(hm1)[0] != locateTTHalfModule(hm2)[0])): continue\n ## - or neighbouring region\n #elif not ((locateTTHalfModule(hm1)[0] != locateTTHalfModule(hm2)[0])\n # and ( ( (ttmap.numberOfModules[locateTTHalfModule(hm1)[0]] == locateTTHalfModule(hm1)[2]+1 )\n # and (locateTTHalfModule(hm2)[2] == 0) )\n # or ( (ttmap.numberOfModules[locateTTHalfModule(hm2)[0]] == locateTTHalfModule(hm2)[2]+1 )\n # and (locateTTHalfModule(hm1)[2] == 0) ) ) ): continue\n ## append to list of pairs\n #pairs.append( (hm1, hm2) )\n print\n return pairs", "def _test2(toprint=False):\n testranges = [Range(450,464), Range(435,465), Range(0,800)]\n results = [4, 10, 34-12 + 440-37 + 800-460]\n qo = QueryObject(\"foo\", [Range(12,34), Range(37,440), Range(460,800)])\n for i,r in enumerate(testranges):\n overlap, retranges = qo.overlap(r,True)\n print(overlap, [str(r) for r in retranges])", "def getOverlap(self):\n return 0.5", "def overlap(table1, table2):\n out = np.zeros(np.size(table1, axis=0), dtype='bool')\n for i in range(np.size(table1, axis=0)):\n s1_s2 = table1[i, 0] < table2[:, 0] \n s1_e2 = table1[i, 0] <= table2[:, 1]\n e1_s2 = table1[i, 1] < table2[:, 0]\n e1_e2 = table1[i, 1] < table2[:, 1]\n # no overlap occurs when all four parameters above either == 0 or 1\n sum_params = np.sum(np.array([s1_s2, s1_e2, e1_s2, e1_e2]), axis=0)\n olap = (sum_params == 1) | (sum_params == 2) | (sum_params == 3)\n out[i] = np.any(olap)\n return out", "def overlap(list1,list2):\n \n coord=[]\n for pos1 in list1:\n #print 'pos in list1 is', pos1\n coord.append(('S',int(pos1.split('-')[0]), 'l1'))\n #print 'S is ', pos1.split('-')[0]\n coord.append(('E',int(pos1.split('-')[1]),'l1'))\n #print 'E is ', pos1.split('-')[1]\n #print coord \n for pos2 in list2:\n #print 'pos in list2 is', pos2\n coord.append(('S',int(pos2.split('-')[0]),'l2'))\n #print 'S is ', pos2.split('-')[0]\n coord.append(('E', int(pos2.split('-')[1]),'l2'))\n #print 'E is ', pos2.split('-')[1]\n #print coord\n \n coord.sort(key = lambda x : x[0], reverse = True)\n #print 'coord after first sort \\n', coord\n coord.sort(key = lambda x : x[1])\n #print 'coord after 2nd sort by number \\n', coord\n # PART 1: SEARCHES FOR OVERLAPS BETWEEN 2 HISTONE MARKS\n new_coord_list = [] #initialize new list to which to move all those that don't overlap\n #index = 0 #position in list \n spos=0 # start pos initialized \n ct=0\n ovl=[]\n for pos in coord:\n new_coord_list.append(pos)\n #print pos, 'doesn\\'t overlap'\n index = int(new_coord_list.index(pos)) \n if pos[0]=='S':\n ct+=1\n if ct==2:\n spos=pos[1]\n if pos[0]=='E':\n ct-=1\n if ct==1:\n if not spos==pos[1]:\n #print spos, '-', pos[1], 'overlap'\n ovl.append(('ovl', spos, pos[1])) # add to overlap vector the positions that overlap\n #print 'overlap found! :', [str(spos),str(pos[1]),'ovl']\n #print 'removing ', new_coord_list[index]\n del new_coord_list[index]\n #print 'removing', new_coord_list[index-1]\n del new_coord_list[index-1]\n \n # \n new_coord_list.sort(key = lambda x : x[0], reverse = True)\n start=0\n end = 0\n two_hist_away_from_cent_of_peak = 0\n two_hist_away_list = []\n for nc_pos in new_coord_list:\n if nc_pos[0]=='S':\n if (start<=two_hist_away_from_cent_of_peak) and (two_hist_away_from_cent_of_peak !=0) and (end!=0): \n #if center_of_peak <= two_hist_away_from_cent_of_peak and (two_hist_away_from_cent_of_peak !=0):\n two_hist_away_list.append('-'.join([str(start),str(end), 'tha']))\n start= nc_pos[1]\n if nc_pos[0]=='E':\n end = nc_pos[1]\n center_of_peak= (start+nc_pos[1])/2\n two_hist_away_from_cent_of_peak = center_of_peak + 300\n # print 'new_coord_list: ', new_coord_list\n return ovl, new_coord_list", "def overlap(t1, t2):\n t1 = dict(min=np.min(t1), max=np.max(t1))\n t2 = dict(min=np.min(t2), max=np.max(t2))\n for t in (t1, t2):\n t['dur'] = t['max'] - t['min']\n\n # Ensure t1 min < t2 min\n if t2['min'] < t1['min']:\n print('t2 starts earlier')\n t1, t2 = t2, t1\n \n # var names wrt t2\n min_inside = t2['min'] >= t1['min'] and t2['min'] <= t1['max']\n max_inside = t2['max'] <= t1['max']\n if min_inside and max_inside:\n # t2 completely contained by t1\n return (t2['min'], t2['max'])\n elif min_inside:\n # t2 partially contained by t1\n return (t2['min'], t1['max'])\n else:\n # no overlap\n return (None, None)", "def calculate_overlaps(drives, dist_tol, time_tol):\n \n for i1 in range(len(drives)-1):\n d1 = drives[i1]\n \n for i2 in range(i1+1, len(drives)):\n d2 = drives[i2]\n \n #stop trying if d1 ends more than time_tol before d2 starts\n #note that drives are chronologically ordered\n if d2.coords[0].time - d1.coords[-1].time > time_tol:\n break\n \n overlap = ol.compute_overlap(d1, d2, dist_tol, time_tol)\n if overlap:\n ol1 = ol.Overlap(d1, d2, overlap[0], overlap[1])\n d1.append_overlap(ol1)\n ol2 = ol.Overlap(d2, d1, overlap[2], overlap[3])\n d2.append_overlap(ol2)", "def compute_overlaps(boxes1, boxes2):\n # Areas of anchors and GT boxes\n if boxes1.shape[1] == 4:\n area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i] #this is the gt box\n overlaps[:, i] = compute_iou_2D(box2, boxes1, area2[i], area1)\n return overlaps\n\n else:\n # Areas of anchors and GT boxes\n volume1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1]) * (boxes1[:, 5] - boxes1[:, 4])\n volume2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1]) * (boxes2[:, 5] - boxes2[:, 4])\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(boxes2.shape[0]):\n box2 = boxes2[i] # this is the gt box\n overlaps[:, i] = compute_iou_3D(box2, boxes1, volume2[i], volume1)\n return overlaps", "def overlap(x,y):\n if (x[0]<=y[-1] and x[-1]>y[0]) or (y[0]<=x[-1] and y[-1]>x[0]):\n return 1\n else: return 0", "def cal_overlaps(boxes1, boxes2):\n area1 = (boxes1[:, 0] - boxes1[:, 2]) * (boxes1[:, 1] - boxes1[:, 3]) # (Nsample, 1)\n area2 = (boxes2[:, 0] - boxes2[:, 2]) * (boxes2[:, 1] - boxes2[:, 3]) # (Msample, 1)\n\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0])) # (Nsample, Msample)\n\n # calculate the intersection of boxes1(anchor) and boxes2(GT box)\n for i in range(boxes1.shape[0]):\n overlaps[i][:] = cal_iou(boxes1[i], area1[i], boxes2, area2)\n\n return overlaps", "def test_compute_overlap(self):\n # box1 contained in box2\n box1 = ((1, 2), (1, 2), (1, 2))\n box2 = ((1, 3), (1, 3), (1, 3))\n mapping = {box1: [1, 2, 3, 4], box2: [1, 2, 3, 4, 5]}\n # box1 in box2, so complete overlap\n np.testing.assert_almost_equal(\n dc.dock.binding_pocket.compute_overlap(mapping, box1, box2), 1)\n # 4/5 atoms in box2 in box1, so 80 % overlap\n np.testing.assert_almost_equal(\n dc.dock.binding_pocket.compute_overlap(mapping, box2, box1), .8)", "def getCollisionInfo(r1,r1vel,r2,r2vel):\n \n \n time = None\n isStopping = None\n \n xRange = Rect.getAxisProjCollisionRange(r1.getXProj(),\n r1vel[0],\n r2.getXProj(),\n r2vel[0])\n \n yRange = Rect.getAxisProjCollisionRange(r1.getYProj(),\n r1vel[1],\n r2.getYProj(),\n r2vel[1]) \n \n if xRange == 'OVERLAPS' and yRange == 'OVERLAPS':\n #print 'overlaps!!!!!'\n time = 0.0 #objects already overlap \n elif xRange == 'NEVER_OVERLAPS' or yRange == 'NEVER_OVERLAPS':\n time = 'NEVER_OVERLAPS'\n elif xRange == 'OVERLAPS':\n #if times have different signs, we are inside\n if yRange[0]*yRange[1] < 0:\n time = 0.0\n else:\n time = min(yRange)\n elif yRange == 'OVERLAPS':\n #if times have different signs, we are inside\n if xRange[0]*xRange[1] < 0:\n time = 0.0\n else:\n time = min(xRange)\n else: #xRange and yRange both have valid intervals\n #TODO: FINISH THIS \n overlap = getOverlap(xRange,yRange)\n \n# print 'DEBUG!'\n# print 'overlap', overlap\n \n if overlap is None:\n return None\n else:\n #TODO: WRONG!!!\n #time = overlap\n time = overlap[0] \n \n if xRange[0]*xRange[1] < 0 and yRange[0]*yRange[1] < 0:\n #we are inside\n time = 0.0\n \n if time == 'NEVER_OVERLAPS':\n return None\n \n #TODO: should we allow negatives times and deal with later\n if time < 0: \n return None\n \n \n #TODO: calc if each object will hae it's movement stopped\n \n #get positions at time\n ax = r1.getXProjAtTime(time,r1vel)\n bx = r2.getXProjAtTime(time,r2vel)\n \n ay = r1.getYProjAtTime(time,r1vel)\n by = r2.getYProjAtTime(time,r2vel)\n \n r1Stops = Rect.collisionStopsMovement(ax,ay,r1vel,bx,by)\n r2Stops = Rect.collisionStopsMovement(bx,by,r2vel,ax,ay)\n \n return (time,r1Stops,r2Stops)", "def overlap(self):\n transformation_matrix = self.molecular_orbital_matrix()\n return numpy.dot(numpy.linalg.inv(transformation_matrix),numpy.linalg.inv(numpy.transpose(transformation_matrix)))", "def get_overlap(self, transposon):\n return max(0, min(self.last-transposon.first,\n transposon.last-self.first,\n len(self), len(transposon)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decodes a single string to a list of strings.
def decode (self, s): if s == "null": return [] return s.split(chr(257))
[ "def decode(self, s):\n lststr = s.split(',')\n if s=='': return []\n rst = []\n for i in range(len(lststr)):\n rst.append(lststr[i])\n return rst", "def decode(self, s: str) -> [str]:", "def convert2list(string):\n if isinstance(string, list):\n return string\n else:\n return [string]", "def convert(string: str) -> List[str]:\n li = list(string.split(\" \"))\n return li", "def _decode_list(data: BencodedString) -> list:\n result_list = []\n data.del_prefix(1)\n\n while True:\n if data.bytes:\n if data.bytes[0] != END_MARKER:\n result_list.append(_decode(data))\n else:\n data.del_prefix(1)\n break\n else:\n raise ValueError(\n \"Cannot decode a list, reached end of the bencoded string \"\n \"before the end marker was found. Most likely the bencoded \"\n \"string is incomplete or incorrect.\"\n )\n\n return result_list", "def string_to_list(string):\n return string.split(\"; \")", "def string_to_char_list(my_string):\n return list(my_string)", "def string_to_list(self, S):\n # sanity check\n if not isinstance(S, str):\n raise TypeError(\"input S must be a non-empty string of bits\")\n if len(S) == 0:\n raise ValueError(\"input S must be a non-empty string of bits\")\n\n # perform the conversion from string to list\n bin = BinaryStrings()\n return [bin(s) for s in S]", "def validate_string_list(value):\r\n try:\r\n if sys.version_info.major < 3:\r\n # pylint: disable-msg=W0404\r\n from locale import getpreferredencoding\r\n encoding = getpreferredencoding()\r\n value = value.decode(encoding)\r\n return [x.strip() for x in value.split(u\",\")]\r\n except (AttributeError, TypeError, UnicodeError):\r\n raise ValueError(\"Bad string list\")", "def _strings_to_list(one_or_more_strings):\n if isinstance(one_or_more_strings, str):\n return [one_or_more_strings]\n else:\n return list(one_or_more_strings)", "def stringToIntList(string):\n return AdvancedMapMap().mapData(lambda x: ord(x), string).getResults()", "def str2list(input):\n if isinstance(input, str):\n return [input]\n\n else:\n return input", "def make_list_from_str(self, s, type=None):\n ret = []\n for item in s.split():\n if not item:\n continue\n if type is not None:\n try:\n item = item.strip('[](),')\n item = type(item)\n except ValueError as e:\n msg = \"Cannot convert to a list because we could not convert {} to {}. Original error message: {}\".format(item, type, e)\n raise ConfigError(msg)\n ret.append(item)\n return ret", "def parse_as_list(string):\n try:\n return ast.literal_eval(string)\n except ValueError:\n raise SelmaParseException(\n \"Can't parse '%s' as list\" % string)\n except:\n raise SelmaParseException(\n \"Unknown exception when parsing list from string '%s'\" % string)", "def str2list(string):\n return [string[i:i + 2] for i in range(0, len(string), 2)]", "def string_list(s):\n\n if not isinstance(s, str):\n raise ValueError(f\"Not a string: {s!r}\")\n return [p for p in [part.strip() for part in s.split(\",\")] if p]", "def test_string_to_list_string(self):\n assert_equals(\n str_to_list('a, b, c'),\n ['a', 'b', 'c']\n )", "def test_string_to_list_string(self):\n\n assert_equals(\n str_to_list('a, b, c'),\n ['a', 'b', 'c']\n )", "def explode(_string):\n if not _string or not isinstance(_string, str):\n return _string\n else:\n return list(_string)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
sort list of objects randomly then update everything in this world
def update(self, dt): random.shuffle(self.gameObjects) for item in self.gameObjects: description = item.update(dt)
[ "def dispatch_items_randomly(self, level):\n for item in self.list:\n item.position = Item.define_random_position(item, level)", "def sort(self):\n self.red_bucket = (140, 0)\n self.green_bucket = (230, 0)\n self.blue_bucket = (-230, 0)\n self.rest();\n while len(self.vision_objects) > 0:\n obj = sorted(self.vision_objects, key=lambda x: x.cx)[0]\n x, y = getattr(self, '%s_bucket' % obj.color)\n self.pick(200-obj.cx, obj.cy + 50, 0, -90, vertical_buffer_height=115)\n self.place(x, y, 140, -90)\n time.sleep(1.0);\n self.rest()", "def randomize(self):\r\n\r\n pass", "def pre_randomize(self):\n pass", "def _sort(self):\r\n self.__dict__['_z_ordered_sprites'] = sorted(self.sprites, key=lambda sprite:sprite.z_order)", "def sort_list(self):\n\n previous_item_index = False\n previous_item_value = None\n\n for item_index, item_value in enumerate(self.list):\n\n if previous_item_index != False and item_value < previous_item_value:\n self.display_list_item_shuffling_message(item_value, previous_item_value)\n self.list.insert(previous_item_index, self.list.pop(item_index))\n self.list_items_shuffled_count += 1 \n\n previous_item_index = item_index\n previous_item_value = item_value", "def update(self):\n for sort in sorted(self.systems):\n system = self.systems[sort]\n self._update_system(system)", "def randomize_herbs(self):\n random.shuffle(self.herbivores)", "def place_objects(self):\n placed_objects = []\n index = 0\n np.random.seed(300)\n # place objects by rejection sampling\n for _, obj_mjcf in self.mujoco_objects.items():\n horizontal_radius = obj_mjcf.get_horizontal_radius()\n bottom_offset = obj_mjcf.get_bottom_offset()\n success = False\n for _ in range(5000): # 5000 retries\n bin_x_half = self.bin_size[0] / 2 - horizontal_radius - 0.05\n bin_y_half = self.bin_size[1] / 2 - horizontal_radius - 0.05\n object_x = np.random.uniform(high=bin_x_half, low=-bin_x_half)\n object_y = np.random.uniform(high=bin_y_half, low=-bin_y_half)\n\n # make sure objects do not overlap\n object_xy = np.array([object_x, object_y, 0])\n pos = self.bin_offset - bottom_offset + object_xy\n location_valid = True\n for pos2, r in placed_objects:\n dist = np.linalg.norm(pos[:2] - pos2[:2], np.inf)\n if dist <= r + horizontal_radius:\n location_valid = False\n break\n\n # place the object\n if location_valid:\n # add object to the position\n placed_objects.append((pos, horizontal_radius))\n self.objects[index].set(\"pos\", array_to_string(pos))\n # random z-rotation\n quat = self.sample_quat()\n self.objects[index].set(\"quat\", array_to_string(quat))\n success = True\n print('object {} in pick place task: pos:{}, quat:{}'.format(index, pos, quat))\n break\n\n # raise error if all objects cannot be placed after maximum retries\n if not success:\n raise RandomizationError(\"Cannot place all objects in the bins\")\n index += 1", "def post_randomize(self):\n pass", "def test_shuffle(self):\n random.shuffle(self.liste)\n self.liste.sort()\n self.assertEqual(self.liste, list(range(10)))", "def totem_random():\n random_head()\n random_head()\n random_head()", "def randomize():\n random.shuffle(chores)\n random.shuffle(names)", "def update(self, speed):\n\n self.entity_list = sorted(self.entity_list, key=lambda Entity: -Entity.distance)\n\n garbage_collection = []\n for entity in self.entity_list:\n entity.update(speed)\n if entity.pos.x + (0.5 * entity.img.get_width()) < 0:\n garbage_collection.append(entity)\n\n for entity in garbage_collection:\n self.entity_list.remove(entity)\n del entity\n\n self.populate(self.screen_size)", "def _shuffle(self):\n self._ziehung = random.sample(range(1,50), 6)", "def put_items(self,*maplist):\n self.position_x = random.randint(0, (len(maplist) - 1))\n self.position_y = random.randint(1, (len(maplist[0]) - 2))\n\n while maplist[self.position_y][self.position_x] == \"x\":\n self.position_x = random.randint(0, (len(maplist) - 1))\n self.position_y = random.randint(1, (len(maplist[0]) - 2))", "def reindex_graphics(self):\n for obj in self.context.static_objects:\n self.canvas.children.remove(obj.widget.canvas)\n # fill _objects_z_index\n _objects_z_index = {}\n for obj in self.context.static_objects:\n y = obj.widget.pos[1]\n if not y in _objects_z_index:\n _objects_z_index[y] = []\n _objects_z_index[y].append(obj)\n _keys = _objects_z_index.keys()\n _keys.sort()\n _keys.reverse()\n for k in _keys:\n objs = _objects_z_index[k]\n for obj in objs:\n self.canvas.add(obj.widget.canvas)", "def sortPool(self):\n\t\tif not self.sorted:\n\t\t\tself.schedules.sort(key=lambda schedule: schedule.fitness, reverse=True)\n\t\t\tself.sorted = True", "def shuffle(self):\n self.logger.debug('Shuffling wallpaper queue')\n\n random.shuffle(self.wallpapers)\n self.index = 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
add this to the world
def add_to_world(self, thing): thing.set_world_info(self.current_id, self) self.gameObjects.append(thing) self.current_id += 1
[ "def create_the_world(cls):\n from muddery.server.mappings.element_set import ELEMENT\n world = ELEMENT(\"WORLD\")()\n world.setup_element(\"\")\n cls._world_data = world", "def create_world(self, parent):\n raise NotImplementedError", "def world(self, value):\n self.worlds[self.world_index] = value", "def run_world(self):\n self.world_alive = True\n self.world_setable = False", "def apply_to_world(self, world):\n # add the current obstacles\n for obstacle in self.current_obstacles:\n world.add_obstacle(obstacle)\n\n # program the robot supervisors\n for robot in world.robots:\n robot.supervisor.goal = self.current_goal[:]", "def add_entity(self, entity):\r\n self.entities[entity.id] = entity\r\n \r\n # Update state.\r\n entity.flags.add(\"in world\")", "async def async_added_to_opp(self):\n self.opp.data[DOMAIN][\"entities\"][\"scene\"].append(self)", "def setWorld(self, state):\n\t\tself.world = state", "def getWorld(self):\n\t\treturn self.world", "def add(self, thought):\n pass", "def getWorld(self):\n return self.world", "def build_world(self, width, height, entrance, agent, objects):\n env = WumpusEnvironment(width, height, entrance)\n if self.trace:\n agent = wumpus_environment.TraceAgent(agent)\n agent.register_environment(env)\n env.add_thing(agent, env.entrance)\n for (obj, loc) in objects:\n env.add_thing(obj, loc)\n print env.to_string()\n print self.objects \n return env", "def add_to_initiative(self):\n\t\treturn", "def add_word(self, word):\n word = self.map_word(word)\n super(InvariantLanguage, self).add_word(word)", "def setCamera(self, camera):\r\n self.camera = camera\r\n camera.addWorld(self)", "def __init__(self, index):\n self.index = index\n self.world = None # world is supplied slightly later by the main game", "def add_gameobject(self, game_object):\n game_object.world = self\n if game_object.type == \"player\":\n if self.player is None:\n self.player = game_object\n else:\n game_object.world = None\n raise ValueError(\"World already has a player.\")\n elif game_object.type == \"tile\":\n self.tiles.append(game_object)\n pos = game_object.pos.astype(np.int32)\n self.tiles_fast_access[pos[0] // TILE_SIZE[0]][pos[1] // TILE_SIZE[1]] = game_object\n if self.surface is not None:\n self.surface.blit(game_object.image, pos)\n elif game_object.type == \"background\":\n self.background_objects.append(game_object)\n else:\n self.game_objects.append(game_object)", "def add_body(self, planet):\n pass", "def add_to_player_list(self, player):\n player.monster_list.add(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merge data from an apdex metric object.
def merge_apdex_metric(self, metric): self[0] += metric.satisfying self[1] += metric.tolerating self[2] += metric.frustrating self[3] = ((self[0] or self[1] or self[2]) and min(self[3], metric.apdex_t) or metric.apdex_t) self[4] = max(self[4], metric.apdex_t)
[ "def merge_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, other in metrics:\n key = (name, '')\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)", "def mergeAggregatedCsvData(self, contexts, obj, aggData1, aggData2):\n return aggData1 + aggData2", "def populate_metric_values(self):\n self.new_counter_metrics: Dict[iter8id, Dict[iter8id, CounterDataPoint]] = get_counter_metrics(\n self.counter_metric_specs, \n [version.spec for version in self.detailed_versions.values()],\n self.eip.start_time\n )\n \n for detailed_version in self.detailed_versions.values():\n detailed_version.aggregate_counter_metrics(self.new_counter_metrics[detailed_version.id])\n\n self.aggregated_counter_metrics = self.get_aggregated_counter_metrics()\n\n self.new_ratio_metrics: Dict[iter8id, Dict[iter8id, RatioDataPoint]] = get_ratio_metrics(\n self.ratio_metric_specs, \n self.counter_metric_specs, \n self.aggregated_counter_metrics,\n [version.spec for version in self.detailed_versions.values()],\n self.eip.start_time\n )\n\n # This is in the shape of a Dict[str, RatioMaxMin], where the keys are ratio metric ids\n # and values are their max mins. \n\n self.ratio_max_mins = self.get_ratio_max_mins()\n\n for detailed_version in self.detailed_versions.values():\n detailed_version.aggregate_ratio_metrics(\n self.new_ratio_metrics[detailed_version.id]\n )", "def _collate_metric_values(self):\n collated_measurement_values = {}\n for info in self.__tags_to_metric.values():\n self._collate_metric_info(info, collated_measurement_values)\n return collated_measurement_values", "def __add__(self, other_metric_set):\n new_metric_set = self\n for name, metric in other_metric_set.items():\n new_metric_set[name] = metric\n return new_metric_set", "def append_aggregated_data(resultdata):\n resultdata['AggregateData'] = dict(resultdata['metrics'])\n for key, entryTimesAndValues in resultdata['AggregateData'].items():\n metricvals = [item[1] for item in entryTimesAndValues]\n resultdata['AggregateData'][key] = {\n 'mean': dbmath.mean(metricvals),\n 'median': dbmath.median(metricvals),\n 'stddev': dbmath.standard_deviation(metricvals),\n 'min': min(metricvals),\n 'max': max(metricvals)\n }\n return resultdata", "def _aggregate_log_values(self, source, dest):\n remove = []\n for key, item in source.items():\n if \"data\" not in item:\n # Assume it's a sub-group\n dest[key] = {}\n self._aggregate_log_values(item, dest[key])\n else:\n aggregator = self._get_aggregator_for_key(key, item['agg'])\n value = aggregator(item['data'])\n if item['precision'] is not None:\n value = round(value, item['precision'])\n dest[key] = value\n if item['scope'] == 'get':\n remove.append(key)\n for key in remove:\n del source[key]", "def build_metric_obj(self, metric, current_log):\n\n relevant_data = self.get_relevant_lines(metric, current_log)\n metric, important_line = self.refine_relevant_lines(metric,\n relevant_data)\n\n user_line, metric = self.extract_userline(important_line, metric)\n\n metric['user'], metric['user_id'] = self.resolve_user(user_line,\n metric['thread'])\n metric['thread'] = self.sort_threads(metric['thread'], metric['logs'])\n\n return metric", "def update(self, data: Mapping[str, np.ndarray]) -> Self:\n\n for metric in self.metrics:\n metric.update(data)\n\n return self", "def _SerializeAggregateMetrics(obj):\r\n if isinstance(obj, metric.AggregatedMetric):\r\n return {'start_time': obj.start_time,\r\n 'end_time': obj.end_time,\r\n 'group_key': obj.group_key,\r\n 'machines': list(obj.machines),\r\n 'data': obj.counter_data\r\n }\r\n elif isinstance(obj, metric.AggregatedMetric.AggregatedCounter):\r\n logging.info('description: %r' % obj.description)\r\n return {'description': obj.description,\r\n 'is_average': obj.is_average,\r\n 'machine_data': obj.machine_data,\r\n 'cluster_total': obj.cluster_total,\r\n 'cluster_avg': obj.cluster_avg\r\n }\r\n else:\r\n raise web.HTTPError(400, \"Expected instance of AggregatedMetric or AggregatedCounter, got %r\" % obj)", "def merge_metric_stats(self, snapshot):\n\n if not self.__settings:\n return\n\n for key, other in six.iteritems(snapshot.__stats_table):\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)", "def update_metric_values(self, data):\n updated_metrics = []\n\n for metric in data:\n # Get the metric entity by name or create a new entity\n m = self.get_metric(metric)\n if m is None:\n m = self.create_metric(metric)\n\n # Add the value to the corresponding metric entity\n m.add_value(data[metric])\n\n updated_metrics.append(m)\n\n return updated_metrics", "def _merge_report(self, target, new):\n time = None\n if 'ts' in new['parsed']:\n time = new['parsed']['ts']\n\n if (target.get('lastSeenDate', None) and\n time and\n target['lastSeenDate'] < time):\n target['lastSeenDate'] = time\n\n query_millis = int(new['parsed']['stats']['millis'])\n target['stats']['totalTimeMillis'] += query_millis\n target['stats']['count'] += 1\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']", "def aggregate(all_metrics, reducer, suffix):\n # Collect metric separately\n separated_metrics = {} # type: dict[frozenset, list[dict]]\n for el in all_metrics:\n key = frozenset(el[\"metric\"][\"dimensions\"].items())\n if key not in separated_metrics:\n separated_metrics[key] = [el]\n else:\n separated_metrics[key].append(el)\n\n # Collect all dimensions\n dims = {}\n for metric_dims in separated_metrics.keys():\n for prop, val in dict(metric_dims).iteritems():\n if prop in dims:\n dims[prop].add(val)\n else:\n dims[prop] = set(val)\n\n # Sort each metric\n for _, metric in separated_metrics.iteritems():\n metric.sort(key=lambda v: v[\"metric\"][\"timestamp\"])\n\n separated_metrics = sorted(separated_metrics.values(), key=len)\n separated_metrics.reverse()\n\n # Compute the new values\n new_values = []\n all_timestamps = map(\n lambda l: map(\n lambda x: x[\"metric\"][\"timestamp\"], l),\n separated_metrics)\n metric_count = len(separated_metrics)\n for index in range(0, len(separated_metrics[0])):\n new_value = reducer[0](\n separated_metrics[0][index][\"metric\"][\"value\"],\n metric_count)\n new_timestamp = separated_metrics[0][index][\"metric\"][\"timestamp\"]\n for metric_index in range(1, metric_count):\n new_value = reducer[1](new_value, helpers.interpolate(\n new_timestamp,\n separated_metrics[metric_index],\n all_timestamps[metric_index]\n ), metric_count)\n new_values.append((new_timestamp, new_value))\n\n # Aggregate the other details:\n metric_name = separated_metrics[0][0][\"metric\"][\"name\"] + suffix\n meta = separated_metrics[0][0][\"meta\"]\n new_metrics = [\n helpers.create_agg_metric(\n metric_name,\n meta,\n dims,\n val[0],\n val[1]\n ) for val in new_values\n ]\n return new_metrics", "def get_metric_data(config, metric_list, metric_grouping, start_time, end_time, collected_data_map):\n\n def format_data_entry(json_data_entry):\n metric_name = json_data_entry.get('metric')\n host_name = json_data_entry.get('tags', {}).get('host') or 'unknownHost'\n dps = json_data_entry.get('dps', {})\n metric_value = None\n header_field = normalize_key(metric_name) + \"[\" + host_name + \"]:\" + str(\n get_grouping_id(metric_name, metric_grouping))\n mtime = 0\n for stime, val in dps.items():\n if int(stime) > mtime:\n metric_value = val\n mtime = int(stime)\n\n epoch = mtime * 1000\n\n if epoch in collected_data_map:\n timestamp_value_map = collected_data_map[epoch]\n else:\n timestamp_value_map = {}\n\n timestamp_value_map[header_field] = str(metric_value)\n collected_data_map[epoch] = timestamp_value_map\n\n json_data = {\n \"token\": config['OPENTSDB_TOKEN'],\n \"start\": start_time,\n \"end\": end_time,\n \"queries\": map(lambda m: {\n \"aggregator\": \"avg\",\n \"downsample\": \"1m-avg\",\n \"metric\": m.encode('ascii')\n }, metric_list)\n }\n\n url = config[\"OPENTSDB_URL\"] + \"/api/query\"\n response = requests.post(url, data=json.dumps(json_data))\n if response.status_code == 200:\n rawdata_list = response.json()\n logger.debug(\"Get metric data from opentsdb: \" + str(len(rawdata_list)))\n\n # format metric and save to collected_data_map\n map(lambda d: format_data_entry(d), rawdata_list)", "def _update_metric(\n metrics: List[mlflow.entities.Metric], dataset: MetricsDict = {}\n ) -> MetricsDict:\n for metric in metrics:\n metric_dict = {\"step\": metric.step, \"value\": metric.value}\n if metric.key in dataset:\n if isinstance(dataset[metric.key], list):\n dataset[metric.key].append(metric_dict)\n else:\n dataset[metric.key] = [dataset[metric.key], metric_dict]\n else:\n dataset[metric.key] = metric_dict\n return dataset", "async def collect_metric(self, session: aiohttp.ClientSession, metric_uuid, metric, next_fetch: datetime) -> None:\n self.last_parameters[metric_uuid] = metric\n self.next_fetch[metric_uuid] = next_fetch\n if measurement := await self.collect_sources(session, metric):\n measurement[\"metric_uuid\"] = metric_uuid\n await post(session, URL(f\"{self.server_url}/internal-api/{self.API_VERSION}/measurements\"), measurement)", "def _add_to_prometheus_metrics(self, scope, data):\n\n try: created = parse(data.get(\"created_at\")).timestamp()\n except TypeError: created = 0\n try: finished = parse(data.get(\"finished_at\")).timestamp()\n except TypeError: finished = 0\n try: started = parse(data.get(\"started_at\")).timestamp()\n except TypeError: started = 0\n\n self._prometheus_metrics[scope][\"id\"].add_metric([self._project, scope], data.get(\"id\", 0))\n self._prometheus_metrics[scope][\"duration\"].add_metric([self._project, scope], data.get(\"duration\", 0))\n self._prometheus_metrics[scope][\"created_timestamp\"].add_metric([self._project, scope], created)\n self._prometheus_metrics[scope][\"finished_timestamp\"].add_metric([self._project, scope], finished)\n self._prometheus_metrics[scope][\"started_timestamp\"].add_metric([self._project, scope], started)", "def merge_time_metric(self, metric):\n\n self.merge_raw_time_metric(metric.duration, metric.exclusive)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merge data from a time metric object.
def merge_time_metric(self, metric): self.merge_raw_time_metric(metric.duration, metric.exclusive)
[ "def _add_to_prometheus_metrics(self, scope, data):\n\n try: created = parse(data.get(\"created_at\")).timestamp()\n except TypeError: created = 0\n try: finished = parse(data.get(\"finished_at\")).timestamp()\n except TypeError: finished = 0\n try: started = parse(data.get(\"started_at\")).timestamp()\n except TypeError: started = 0\n\n self._prometheus_metrics[scope][\"id\"].add_metric([self._project, scope], data.get(\"id\", 0))\n self._prometheus_metrics[scope][\"duration\"].add_metric([self._project, scope], data.get(\"duration\", 0))\n self._prometheus_metrics[scope][\"created_timestamp\"].add_metric([self._project, scope], created)\n self._prometheus_metrics[scope][\"finished_timestamp\"].add_metric([self._project, scope], finished)\n self._prometheus_metrics[scope][\"started_timestamp\"].add_metric([self._project, scope], started)", "def _merge_report(self, target, new):\n time = None\n if 'ts' in new['parsed']:\n time = new['parsed']['ts']\n\n if (target.get('lastSeenDate', None) and\n time and\n target['lastSeenDate'] < time):\n target['lastSeenDate'] = time\n\n query_millis = int(new['parsed']['stats']['millis'])\n target['stats']['totalTimeMillis'] += query_millis\n target['stats']['count'] += 1\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']", "def _load_time(self):\n\n time_variables = ('time', 'Times', 'Itime', 'Itime2')\n got_time, missing_time = [], []\n for time in time_variables:\n # Since not all of the time_variables specified above are required, only try to load the data if they\n # exist. We'll raise an error if we don't find any of them though.\n if time in self.ds.variables:\n setattr(self.time, time, self.ds.variables[time][:])\n got_time.append(time)\n attributes = type('attributes', (object,), {})()\n for attribute in self.ds.variables[time].ncattrs():\n setattr(attributes, attribute, getattr(self.ds.variables[time], attribute))\n setattr(self.atts, time, attributes)\n else:\n missing_time.append(time)\n\n if len(missing_time) == len(time_variables):\n warn('No time variables found in the netCDF.')\n else:\n if 'Times' in got_time:\n # Overwrite the existing Times array with a more sensibly shaped one.\n self.time.Times = np.asarray([''.join(t.astype(str)).strip() for t in self.time.Times])\n\n # Make whatever we got into datetime objects and use those to make everything else. Note: the `time' variable\n # is often the one with the lowest precision, so use the others preferentially over that.\n if 'Times' not in got_time:\n if 'time' in got_time:\n _dates = num2date(self.time.time, units=getattr(self.ds.variables['time'], 'units'))\n elif 'Itime' in got_time and 'Itime2' in got_time:\n _dates = num2date(self.time.Itime + self.time.Itime2 / 1000.0 / 60 / 60, units=getattr(self.ds.variables['Itime'], 'units'))\n try:\n self.time.Times = np.array([datetime.strftime(d, '%Y-%m-%dT%H:%M:%S.%f') for d in _dates])\n except ValueError:\n self.time.Times = np.array([datetime.strftime(d, '%Y/%m/%d %H:%M:%S.%f') for d in _dates])\n # Add the relevant attribute for the Times variable.\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'time_zone', 'UTC')\n setattr(self.atts, 'Times', attributes)\n\n if 'time' not in got_time:\n if 'Times' in got_time:\n try:\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), '%Y-%m-%dT%H:%M:%S.%f') for t in self.time.Times])\n except ValueError:\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), '%Y/%m/%d %H:%M:%S.%f') for t in self.time.Times])\n elif 'Itime' in got_time and 'Itime2' in got_time:\n _dates = num2date(self.time.Itime + self.time.Itime2 / 1000.0 / 60 / 60, units=getattr(self.ds.variables['Itime'], 'units'))\n # We're making Modified Julian Days here to replicate FVCOM's 'time' variable.\n self.time.time = date2num(_dates, units='days since 1858-11-17 00:00:00')\n # Add the relevant attributes for the time variable.\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'units', 'days since 1858-11-17 00:00:00')\n setattr(attributes, 'long_name', 'time')\n setattr(attributes, 'format', 'modified julian day (MJD)')\n setattr(attributes, 'time_zone', 'UTC')\n setattr(self.atts, 'time', attributes)\n\n if 'Itime' not in got_time and 'Itime2' not in got_time:\n if 'Times' in got_time:\n try:\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), '%Y-%m-%dT%H:%M:%S.%f') for t in self.time.Times])\n except ValueError:\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), '%Y/%m/%d %H:%M:%S.%f') for t in self.time.Times])\n elif 'time' in got_time:\n _dates = num2date(self.time.time, units=getattr(self.ds.variables['time'], 'units'))\n # We're making Modified Julian Days here to replicate FVCOM's 'time' variable.\n _datenum = date2num(_dates, units='days since 1858-11-17 00:00:00')\n self.time.Itime = np.floor(_datenum)\n self.time.Itime2 = (_datenum - np.floor(_datenum)) * 1000 * 60 * 60 # microseconds since midnight\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'units', 'days since 1858-11-17 00:00:00')\n setattr(attributes, 'format', 'modified julian day (MJD)')\n setattr(attributes, 'time_zone', 'UTC')\n setattr(self.atts, 'Itime', attributes)\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'units', 'msec since 00:00:00')\n setattr(attributes, 'time_zone', 'UTC')\n setattr(self.atts, 'Itime2', attributes)\n\n # Additional nice-to-have time representations.\n if 'Times' in got_time:\n try:\n self.time.datetime = np.array([datetime.strptime(d, '%Y-%m-%dT%H:%M:%S.%f') for d in self.time.Times])\n except ValueError:\n self.time.datetime = np.array([datetime.strptime(d, '%Y/%m/%d %H:%M:%S.%f') for d in self.time.Times])\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'long_name', 'Python datetime.datetime')\n setattr(self.atts, 'datetime', attributes)\n else:\n self.time.datetime = _dates\n self.time.matlabtime = self.time.time + 678942.0 # convert to MATLAB-indexed times from Modified Julian Date.\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'long_name', 'MATLAB datenum')\n setattr(self.atts, 'matlabtime', attributes)\n\n # Clip everything to the time indices if we've been given them. Update the time dimension too.\n if 'time' in self._dims:\n if all([isinstance(i, (datetime, str)) for i in self._dims['time']]):\n # Convert datetime dimensions to indices in the currently loaded data.\n self._dims['time'][0] = self.time_to_index(self._dims['time'][0])\n self._dims['time'][1] = self.time_to_index(self._dims['time'][1]) + 1 # make the indexing inclusive\n for time in self.obj_iter(self.time):\n setattr(self.time, time, getattr(self.time, time)[self._dims['time'][0]:self._dims['time'][1]])\n self.dims.time = len(self.time.time)", "def mergetime(small, big):\n big['slots'].extend(small['slots'])\n big['times'].append(small)\n return big", "def combine_datetime(record):\n record['time'] = \"%s %s\" % (record['date'], record['time'])\n del record['date']\n return record", "def _fetch_time_metrics_and_clear(self):\n with self._time_rlock:\n time_metrics = self._time_metrics\n self._time_metrics = defaultdict(LatencyTracker)\n\n return time_metrics", "def build_metrics_times_data(time_metrics):\n return [{'name': name, 'latencies': latencies.get_latencies()}\n for name, latencies in iteritems(time_metrics)]", "def combine(self, date, time): # real signature unknown; restored from __doc__\n pass", "def combine(cls, date, time, tzinfo=True):\n if not isinstance(date, _date_class):\n raise TypeError(\"date argument must be a date instance\")\n if not isinstance(time, _time_class):\n raise TypeError(\"time argument must be a time instance\")\n if tzinfo is True:\n tzinfo = time.tzinfo\n return cls(\n date.year,\n date.month,\n date.day,\n time.hour,\n time.minute,\n time.second,\n time.microsecond,\n tzinfo,\n fold=time.fold,\n )", "def CopyToStatTimeTuple(self):", "def combine(date,time):\n return AstroTime(datetime=datetime.combine(date,time))", "def with_time(self):\n key = list(self.keys())[0]\n length = len(self[key])\n time_slices = self[key].time_slices\n\n if time_slices is None:\n raise FeatureError(\"FeatureCollection has no time reference.\")\n\n for i in range(length):\n res = {}\n for key, feature in self.items():\n res[key] = feature.data[feature.name][i]\n yield (time_slices[i], res)", "def aggregate_json(self, data):\n return TimestampedMetricValue(max(self.timestamp, data['t']),\n self.value + data['v'])", "def _add_time_field(self) -> None:\n self.data[\"time\"] = [datetime(int(yyyy), int(mm), int(dd)) + timedelta(hours=hh) for yyyy, mm, dd, hh in zip(self.data[\"year\"], self.data[\"month\"], self.data[\"day\"], self.data[\"hour\"])]\n for key in [\"year\", \"doy\", \"month\", \"day\", \"hour\"]:\n del self.data[key]", "def merge_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, other in metrics:\n key = (name, '')\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)", "def build_time_metric(df_joined,dictionary):\n # Extract the hour\n df_joined[\"Time\"] = df_joined[\"TimeSt\"].str[11:13]\n # Map the score\n df_joined[\"Time_Metric\"] = df_joined[\"Time\"].map(dictionary)\n return df_joined", "def _add_single_metric(self, timestamp, metric_name, value):\n # note that this method is built this way to make it possible to\n # support live-refreshing charts in Bokeh at some point in the future.\n self._data[\"timestamp\"].append(timestamp)\n self._data[\"metric_name\"].append(metric_name)\n self._data[\"value\"].append(value)", "def populate_metric_values(self):\n self.new_counter_metrics: Dict[iter8id, Dict[iter8id, CounterDataPoint]] = get_counter_metrics(\n self.counter_metric_specs, \n [version.spec for version in self.detailed_versions.values()],\n self.eip.start_time\n )\n \n for detailed_version in self.detailed_versions.values():\n detailed_version.aggregate_counter_metrics(self.new_counter_metrics[detailed_version.id])\n\n self.aggregated_counter_metrics = self.get_aggregated_counter_metrics()\n\n self.new_ratio_metrics: Dict[iter8id, Dict[iter8id, RatioDataPoint]] = get_ratio_metrics(\n self.ratio_metric_specs, \n self.counter_metric_specs, \n self.aggregated_counter_metrics,\n [version.spec for version in self.detailed_versions.values()],\n self.eip.start_time\n )\n\n # This is in the shape of a Dict[str, RatioMaxMin], where the keys are ratio metric ids\n # and values are their max mins. \n\n self.ratio_max_mins = self.get_ratio_max_mins()\n\n for detailed_version in self.detailed_versions.values():\n detailed_version.aggregate_ratio_metrics(\n self.new_ratio_metrics[detailed_version.id]\n )", "def interpolateData(self, timestep):\n assert timestep % self.header.analysisPeriod.timestep == 0, \\\n 'Target timestep({}) must be divisable by current timestep({})' \\\n .format(timestep, self.header.analysisPeriod.timestep)\n\n _minutesStep = int(60 / int(timestep / self.header.analysisPeriod.timestep))\n _dataLength = len(self.values)\n # generate new data\n _data = tuple(\n self[d].__class__(_v, self[d].datetime.addminutes(step * _minutesStep))\n for d in xrange(_dataLength)\n for _v, step in zip(self.xxrange(self[d],\n self[(d + 1) % _dataLength],\n timestep),\n xrange(timestep))\n )\n # generate data for last hour\n return _data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Record a single value metric, merging the data with any data from prior value metrics with the same name.
def record_custom_metric(self, name, value): if isinstance(value, dict): if len(value) == 1 and 'count' in value: new_stats = CountStats(call_count=value['count']) else: new_stats = TimeStats(*c2t(**value)) else: new_stats = TimeStats(1, value, value, value, value, value**2) stats = self.__stats_table.get(name) if stats is None: self.__stats_table[name] = new_stats else: stats.merge_stats(new_stats)
[ "def _add_single_metric(self, timestamp, metric_name, value):\n # note that this method is built this way to make it possible to\n # support live-refreshing charts in Bokeh at some point in the future.\n self._data[\"timestamp\"].append(timestamp)\n self._data[\"metric_name\"].append(metric_name)\n self._data[\"value\"].append(value)", "def log(self, metric_name: str, value: float) -> None:\n if metric_name in self.metrics:\n self.metrics[metric_name].append(value)\n else:\n self.metrics[metric_name] = [value]", "def save(self, metric_name: str, metric_value) -> None:\n self.__metrics__.__setitem__(metric_name, metric_value)\n return", "def write_metric(self, metric_name: str, metric_value: Union[float, int]):\n self._metrics.append(Metric(metric_name, metric_value))", "def log_metric(self, metric: str, value: float) -> None:\n self.metadata[metric] = value", "def add_metric(self, name: str, unit: MetricUnit, value: Union[float, int]):\n if not isinstance(value, numbers.Number):\n raise MetricValueError(f\"{value} is not a valid number\")\n\n unit = self.__extract_metric_unit_value(unit=unit)\n metric = {\"Unit\": unit, \"Value\": float(value)}\n logger.debug(f\"Adding metric: {name} with {metric}\")\n self.metric_set[name] = metric\n\n if len(self.metric_set) == MAX_METRICS:\n logger.debug(f\"Exceeded maximum of {MAX_METRICS} metrics - Publishing existing metric set\")\n metrics = self.serialize_metric_set()\n print(json.dumps(metrics))\n\n # clear metric set only as opposed to metrics and dimensions set\n # since we could have more than 100 metrics\n self.metric_set.clear()", "def add_metric(self, metric_name: str, metric_val: typing.Any):\n self.add_metrics({metric_name: metric_val})", "def metrics(self, value):\n self._metrics = value", "def save(metric_name: str, metric_value) -> None:\n if type(metric_value) in [list, dict, set]:\n raise ValueError(\"metric_value must not be a container object\")\n\n global GLOBAL_METRICS_OBJECT\n\n GLOBAL_METRICS_OBJECT.save(metric_name, metric_value)\n return # end of metrics.save", "def custom(self, name, metric, value, tags=None, id_=None):\n self._report(name, metric, value, tags, id_)", "def add_resolved_metric(self, batch_id: str, value: MetricValue) -> None:\n if self.metric_values_by_batch_id is None:\n self.metric_values_by_batch_id = {}\n\n self.metric_values_by_batch_id[batch_id] = value", "def update(self, name: str, value: Union[Number, np.number]) -> None:\n self._metrics[name].update(name, value)", "def set_metric(self, key, value):\n self.h5[METRICS_GROUP_NAME].attrs[key] = cPickle.dumps(value)", "def set(self, name, value):\n self._metrics[name] = value", "def record_summary(self, name, value, tags=None):\n identity = self.create_identity(name, tags, \"summary\")\n with self._lock:\n if identity in self._batch:\n merged_value = self._batch[identity]\n merged_value[\"count\"] += 1\n merged_value[\"sum\"] += value\n merged_value[\"min\"] = min(value, merged_value[\"min\"])\n merged_value[\"max\"] = max(value, merged_value[\"max\"])\n else:\n value = {\"count\": 1, \"sum\": value, \"min\": value, \"max\": value}\n self._batch[identity] = value", "def log_metric(self, name, val):\n raise NotImplementedError", "def add_value(self, name, value):\n if name not in self.data:\n self.data[name] = Statistic(name, value)\n else:\n self.data[name].add_value(value)", "def update_metric(self, metric, value):\n if self.is_number(value):\n self.logger.debug(\"Collected raw metric: %s = %s\" % (metric, value))\n self.raw_metrics[metric] = value", "def metric_recorded(self, record):\n if record.name in self.headers and self.current_row is not None:\n if record.name == \"learning_rate\" and not record.is_scalar:\n # record is a list of scalars\n value = \",\".join([f\"{lr:.4f}\" for lr in record.value])\n elif record.is_scalar and isinstance(record.value, int):\n value = str(record.value)\n else:\n assert record.is_scalar\n\n value = f\"{record.value:.4f}\"\n\n self.current_row[record.name] = value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an iterator over the set of value metrics. The items returned are a tuple consisting of the metric name and accumulated stats for the metric.
def metrics(self): return six.iteritems(self.__stats_table)
[ "def values(self):\r\n return MetricaValues(self)", "def values(self) -> tuple:\n return tuple(self.__metrics__.values())", "def values(self):\n if not self.is_value:\n raise GrizzlyError(\"GrizzlySeries is not evaluated and does not have values. Try calling 'evaluate()' first.\")\n return self.values_", "def values(self):\r\n return AveragedMetricaValues(self)", "def itervaluerefs(self):\n for value in self.itervalues():\n yield ref(value)", "def metrics(self):\n return list(self._cache._nfvi_metrics.values())", "def values(self):\n return iter(util.annotate(v) for _, v in self._pairs())", "def iter_values(self):\n values = self.values\n if (values is not None):\n yield from values", "def get_metrics_list():\n return list(metrics_dict.keys())", "def collect(self) -> None:\n if self._callback is not None:\n self._callback()\n\n while self._metrics_to_export:\n for export_record in self._metrics_to_export.popleft():\n prometheus_metric = self._translate_to_prometheus(\n export_record\n )\n if prometheus_metric is not None:\n yield prometheus_metric", "def __iter__(self):\n for value in self.values:\n yield value", "def metrics(self):\n\n data = requests.get(\n f\"http://{self.prometheus_host}:{self.prometheus_port}/metrics\"\n ).content.decode()\n lines = [line for line in data.split(\"\\n\") if not line.startswith(\"#\")]\n metrics = {}\n for line in lines:\n if not line:\n continue\n\n name, value = line.split(\" \")\n\n try:\n value = int(value) # type: ignore\n except ValueError:\n value = float(value) # type: ignore\n\n if \"{\" in name and \"}\" in name:\n base = name[: name.index(\"{\")]\n tags = name[name.index(\"{\") + 1 : -1]\n tags = [tag.split(\"=\") for tag in tags.split(\",\")]\n tags = [(key, val.replace('\"', \"\")) for key, val in tags]\n\n name = base + \"#\" + \",\".join(f\"{k}:{v}\" for k, v in sorted(tags))\n\n metrics[name] = value\n\n return metrics", "def getMetrics(self):\n return self.__metricsMgr.getMetrics()", "def itervalues(cls):\n return map(operator.attrgetter(\"value\"), cls.iterconstants())", "def itervalues(self):\r\n return iter(self.values())", "def iter_values(self):\n if self.contributes:\n for value in self.values:\n if isinstance(value, GroupingComponent):\n for x in value.iter_values():\n yield x\n else:\n yield value", "def metrics(self, value):\n self._metrics = value", "def get_metrics(): # noqa: E501\n return 'do some magic!'", "def get_vals(self) -> Iterable[float]:\n return list(self._trend_vals.values())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merge data from a slow sql node object.
def merge_slow_sql_node(self, node): duration = node.duration self[1] += duration self[2] = self[0] and min(self[2], duration) or duration self[3] = max(self[3], duration) if self[3] == duration: self[4] = node # Must update the call count last as update of the # minimum call time is dependent on initial value. self[0] += 1
[ "def concatenate_data():", "def merge(self, obj, **kwargs):\r\n raise NotImplementedError\r\n # if type(obj) == StreamFork:\r\n # node = obj.node\r\n # else:\r\n # node = obj\r\n #\r\n # self.stream.append(node)\r\n #\r\n # merge = MergeNode(**kwargs)\r\n # self.stream.append(merge)\r\n # self.stream.connect()\r", "def getdata(engine ,sql): \r\n starttime = datetime.datetime.now()\r\n conn = engine.connect()\r\n try:\r\n getrst = pd.read_sql(sql,conn)\r\n except Exception as e:\r\n print('getdata error: {} \\n engine and sql as below: \\n {} \\n {}'.format(repr(e),engine,sql))\r\n finally:\r\n conn.close()\r\n endtime = datetime.datetime.now()\r\n logger.info('......it took {} seconds to get rawdata.'.format((endtime - starttime).seconds))\r\n return getrst", "def merge(self, other_btree):\n pass", "def get_both_prox_data(verbose=False):\n _pickle_file = \".prox_data.pkl\"\n\n if os.path.exists(_pickle_file):\n with open(_pickle_file, \"rb\") as f:\n _data = pickle.load(f)\n else:\n _mobile_data = get_mobile_prox_data()\n _fixed_data = change_zone_to_coordinates(get_fixed_prox_data(), \"prox\")\n\n _fixed_data.drop(columns=[\"zone\"], inplace=True)\n _data = pandas.concat([_mobile_data, _fixed_data], ignore_index=True, verify_integrity=True)\n _data.sort_values(by=[\"timestamp\"], inplace=True)\n\n with open(_pickle_file, \"wb\") as f:\n pickle.dump(_data, f)\n\n if verbose:\n print(_data)\n\n _sql_file = sql_directory + \"prox_data.sql\"\n with open(_sql_file, \"w\") as f:\n for index, row in _data.iterrows():\n f.write(\"INSERT INTO `ProxData` (`IndexColumn`, `Timestamp`, `prox-id`, `floor`, `x`, `y`) VALUES (NULL, '\" + str(row[\"timestamp\"]) + \"', '\" + row[\"prox-id\"] + \"', '%d', '%d', '%d');\" % (row[\"floor\"], row[\"x\"], row[\"y\"]))\n\n return _data", "def datastream(self, kwargs):\n head = kwargs.get('head', dict(timestamp=time.time()))\n if not head.has_key('mongo_query'):\n head['mongo_query'] = head['dasquery'].mongo_query \\\n if head.has_key('dasquery') else {}\n if head.has_key('dasquery'):\n del head['dasquery']\n if head.has_key('args'):\n del head['args']\n data = kwargs.get('data', [])\n return head, data", "def archive_ost_data(self, lmtdb):\n\n dataset_names = [\n 'datatargets/readbytes',\n 'datatargets/writebytes',\n 'fullness/bytes',\n 'fullness/bytestotal',\n 'fullness/inodes',\n 'fullness/inodestotal'\n ]\n\n self.init_datasets(dataset_names, lmtdb.ost_names)\n\n # Now query the OST_DATA table to get byte counts over the query time range\n results, columns = lmtdb.get_ost_data(self.query_start, self.query_end_plusplus)\n\n # Index the columns to speed up insertion of data\n col_map = {}\n try:\n for db_col in ['TIMESTAMP', 'OST_ID', 'READ_BYTES',\n 'WRITE_BYTES', 'KBYTES_USED', 'KBYTES_FREE',\n 'INODES_USED', 'INODES_FREE']:\n col_map[db_col] = columns.index(db_col)\n except ValueError:\n raise ValueError(\"LMT database schema does not match expectation\")\n\n # Loop through all the results of the timeseries query\n for row in results:\n if isstr(row[col_map['TIMESTAMP']]):\n # SQLite stores timestamps as a unicode string\n timestamp = datetime.datetime.strptime(row[col_map['TIMESTAMP']],\n \"%Y-%m-%d %H:%M:%S\")\n else:\n # MySQL timestamps are automatically converted to datetime.datetime\n timestamp = row[col_map['TIMESTAMP']]\n target_name = lmtdb.ost_id_map[row[col_map['OST_ID']]]\n for dataset_name in dataset_names:\n target_dbcol = self.config[dataset_name].get('column')\n if target_dbcol is not None:\n self[dataset_name].insert_element(\n timestamp,\n target_name,\n row[col_map[target_dbcol]])\n elif dataset_name == 'fullness/bytestotal':\n self[dataset_name].insert_element(\n timestamp,\n target_name,\n row[col_map['KBYTES_USED']] + row[col_map['KBYTES_FREE']])\n elif dataset_name == 'fullness/inodestotal':\n self[dataset_name].insert_element(\n timestamp,\n target_name,\n row[col_map['INODES_USED']] + row[col_map['INODES_FREE']])\n else:\n errmsg = \"%s in self.config but missing 'column' setting\" % dataset_name\n raise KeyError(errmsg)", "def _load_elastic(self, sqldata):\n from collections import defaultdict\n attributes = ResourceMetricsLoader.attr_fields.keys()\n records = defaultdict(lambda: defaultdict(int))\n for sd in sqldata:\n r = dict(sd.items())\n if r['ATTRIBUTE_NAME'] not in attributes:\n continue\n # Only store hostnames and not FQDN for resources\n r['RESOURCE_NAME'] = r['RESOURCE_NAME'].split('.')[0]\n\n (attr, val) = self._get_attr_val(r)\n records[r.get('RESOURCE_NAME'),r.get('TIME_STAMP')][attr] = val\n records[r.get('RESOURCE_NAME'),r.get('TIME_STAMP')]['INSERT_SEQ'] = r['INSERT_SEQ']\n\n # Construct docs from records\n inserts = [] \n for k, v in records.iteritems():\n body = { attr: val for attr, val in v.iteritems() } \n body['RESOURCE_NAME'], body['TIME_STAMP'] = k\n document = {\n \"_index\" : self._get_index_name(body['TIME_STAMP']),\n \"_type\" : 'default',\n \"_source\" : body\n }\n inserts.append(document)\n \n # Insert list of documents into elasticsearch\n self.logger.info(\"Loading chunk into elasticsearch\")\n status = helpers.bulk(self.es,\n inserts,\n self.chunk_size)\n self.logger.info(\"Finished loading chunk into elasticsearch\")\n\n # update sequence to last item in the results\n #self.seq = dict(results[-1].items())[self.id_field]\n self.seq = sqldata[-1][self.seq_field]\n \n return status", "def _load_elastic(self, sqldata):\n inserts = []\n for r in sqldata:\n body = self._preprocess(dict(r.items()))\n if not body:\n continue # Skip if preprocessing returns False\n index_name = self._get_index_name(body['TIME_STAMP'])\n document = {\n \"_index\" : index_name,\n \"_type\" : 'default', # Hardcoded - we only have 1 doctype\n \"_id\" : body[self.seq_field],\n \"_source\" : body\n }\n inserts.append(document)\n\n # update sequence to last item in the results\n self.seq = sqldata[-1][self.seq_field]\n \n # Insert list of documents into elasticsearch\n status = helpers.bulk(self.es, inserts, self.chunk_size)\n self.logger.info(\"Inserted %d chunks into %s\" % (self.chunk_size,\n index_name))\n return status", "def launch_query_with_intermediate_table(self, query, result):", "def merge_data(self, parser):\r\n\t\tself.categories = merge_lists(self.categories, parser.categories)\r\n\t\tself.category_mapping = object_merge(self.category_mapping, parser.category_mapping)\r\n\t\tself.elements = merge_lists(self.elements, parser.elements)\r\n\t\tself.device_list = merge_lists(self.device_list, parser.device_list)", "def _merge_report(self, target, new):\n time = None\n if 'ts' in new['parsed']:\n time = new['parsed']['ts']\n\n if (target.get('lastSeenDate', None) and\n time and\n target['lastSeenDate'] < time):\n target['lastSeenDate'] = time\n\n query_millis = int(new['parsed']['stats']['millis'])\n target['stats']['totalTimeMillis'] += query_millis\n target['stats']['count'] += 1\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']", "def merge(self, datalist):\n pass", "def mergeNodes(new, t1, t2):\n \n if t1 and t2:\n new.val = t1.val + t2.val\n elif not t1:\n new.val = t2.val\n elif not t2:\n new.val = t1.val", "def merge(data1, data2, ContainerType=pg.DataContainer, snap=0.001):\n data = ContainerType(data1)\n data.add(data2, snap)\n return data", "def archive_oss_data(self, lmtdb):\n\n dataset_names = [\n 'dataservers/cpuload',\n 'dataservers/memused',\n ]\n\n self.init_datasets(dataset_names, lmtdb.oss_names)\n\n # Now query the OSS_DATA table to get byte counts over the query time range\n results, columns = lmtdb.get_oss_data(self.query_start, self.query_end_plusplus)\n\n # Index the columns to speed up insertion of data\n col_map = {}\n try:\n for db_col in ['TIMESTAMP', 'OSS_ID', 'PCT_CPU', 'PCT_MEMORY']:\n col_map[db_col] = columns.index(db_col)\n except ValueError:\n raise ValueError(\"LMT database schema does not match expectation\")\n\n # Loop through all the results of the timeseries query\n for row in results:\n if isstr(row[col_map['TIMESTAMP']]):\n # SQLite stores timestamps as a unicode string\n timestamp = datetime.datetime.strptime(row[col_map['TIMESTAMP']],\n \"%Y-%m-%d %H:%M:%S\")\n else:\n # MySQL timestamps are automatically converted to datetime.datetime\n timestamp = row[col_map['TIMESTAMP']]\n target_name = lmtdb.oss_id_map[row[col_map['OSS_ID']]]\n for dataset_name in dataset_names:\n target_dbcol = self.config[dataset_name].get('column')\n # target_dbcol=PCT_CPU, target_name=snx11025n022\n if target_dbcol is not None:\n self[dataset_name].insert_element(\n timestamp,\n target_name,\n row[col_map[target_dbcol]])\n else:\n errmsg = \"%s in self.config but missing 'column' setting\" % dataset_name\n raise KeyError(errmsg)", "def copy_data(self, **kwargs):\r\n snow_sql_query = kwargs['snow_sql_query']\r\n data = [self.cursor.execute(sql) for sql in snow_sql_query]\r\n return data", "def merge(self, graph):\n # keep previous self.filename\n # copy data\n for x in graph.data:\n self.data.append(x)\n # copy headers, unless already exists (is so, info is lost)\n for key in graph.headers:\n if key not in self.headers:\n self.headers.update({key: graph.headers[key]})\n # copy graphInfo, unless already exists (is so, info is lost)\n for key in graph.graphInfo:\n if key not in self.graphInfo:\n self.graphInfo.update({key: graph.graphInfo[key]})\n # copy sampleInfo, unless already exists (is so, info is lost)\n for key in graph.sampleInfo:\n if key not in self.sampleInfo:\n self.sampleInfo.update({key: graph.sampleInfo[key]})", "def fromdb(self, select_stmt, args=None, latency=True):\n temp = petl.fromdb(self.connection, select_stmt, args)\n return temp if latency else petl.wrap([row for row in temp])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a count of the number of unique metrics currently recorded for apdex, time and value metrics.
def metrics_count(self): return len(self.__stats_table)
[ "def number_of_running_metrics(self):\n try:\n return len(self.get_classads(\"OSGRSV==\\\"metrics\\\"\"))\n except TypeError:\n self.rsv.log(\"ERROR\", \"Classad parsing failed, unable to count running metrics\")", "def count_metrics(self):\n\n return set(self._counts.keys())", "def metric_data_count(self):\n\n if not self.__settings:\n return 0\n\n return len(self.__stats_table)", "def get_number_of_measurement(self):\n num_of_meas = 0\n for time in self.mdvtc.keys():\n num_of_meas = num_of_meas + self.mdvtc[time].get_number_of_measurement()\n #\n return num_of_meas", "def get_ap_count(self):\n return len(self.aps_infos)", "def _fetch_count_metrics_and_clear(self):\n with self._count_rlock:\n count_metrics = self._count_metrics\n self._count_metrics = defaultdict(int)\n\n return count_metrics", "def get_total_counts(self):\n ret = {}\n all_loggers_count = 0\n for logger, name_map in self.acc_map.items():\n cur_logger_count = 0\n ret[logger.name] = {}\n for name, status_map in name_map.items():\n cur_name_count = 0\n ret[logger.name][name] = {}\n for status, acc in status_map.items():\n cur_count = acc.total_count\n ret[logger.name][name][status] = cur_count\n cur_name_count += cur_count\n cur_logger_count += cur_count\n all_loggers_count += cur_count\n ret[logger.name][name]['__all__'] = cur_name_count\n ret[logger.name]['__all__'] = cur_logger_count\n ret['__all__'] = all_loggers_count\n return ret", "def get_count():\n _check_init()\n return _pypm.CountDevices()", "def number_of_analytics(self):\n return TargetPrice.objects.filter(\n ticker__name=self.name\n ).distinct('analytic').count()", "def getUnseenCount():", "def valuecounts_(self): \n unique, counts = np.unique(self, return_counts=True)\n return np.asarray((unique, counts)).T", "def get_number_of_metrics(self):\n # Get metrics\n metrics = self.get_metrics()\n\n # Calculate number of metrics\n n_metrics = len(metrics)\n\n return n_metrics", "def _get_checkpoint_metrics_counts() -> (int, int):\n def get_count(method):\n proto_bytes = method(api_label=async_checkpoint._ASYNC_CHECKPOINT_V1)\n histogram_proto = summary_pb2.HistogramProto()\n histogram_proto.ParseFromString(proto_bytes)\n return int(histogram_proto.num)\n return get_count(metrics.GetCheckpointWriteDurations), get_count(\n metrics.GetAsyncCheckpointWriteDurations)", "def get_num_calls(self):\n\t\treturn self.aa_count + self.ab_count + self.bb_count", "def metrics(self) -> global___Response.Metrics:", "def __get_metrics(responses: Responses) -> Dict[str, int]:\n measures = responses[0].json()[\"component\"][\"measures\"]\n return dict((measure[\"metric\"], int(measure[\"value\"])) for measure in measures)", "def get_metrics(self):\n self.logger.debug(\"Fetching metrics.\")\n return self._api_query(\"metrics\")['metrics']", "def counts(self):\n return sum(self.counter.values()), len(self.visited)", "def test_get_time_summary_stats_counter():\n # This is constructed to test the parsing logic for timestamps, so the number don't\n # add up.\n runtime_profile = \"- ExampleTimeStats: (Avg: 161.554ms ; \" \\\n \"Min: 101.411us ; \" \\\n \"Max: 1h2m3s4ms5us6ns ; \" \\\n \"Number of samples: 6)\"\n summary_stats = get_time_summary_stats_counter(\"ExampleTimeStats\", runtime_profile)\n assert len(summary_stats) == 1\n assert summary_stats[0].sum == 969324000\n assert summary_stats[0].min_value == 101411\n assert summary_stats[0].max_value == 3723004005006\n assert summary_stats[0].total_num_values == 6" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Record a single apdex metric, merging the data with any data from prior apdex metrics with the same name.
def record_apdex_metric(self, metric): if not self.__settings: return # Note that because we are using a scope here of an empty string # we can potentially clash with an unscoped metric. Using None, # although it may help to keep them separate in the agent will # not make a difference to the data collector which treats None # as an empty string anyway. key = (metric.name, '') stats = self.__stats_table.get(key) if stats is None: stats = ApdexStats(apdex_t=metric.apdex_t) self.__stats_table[key] = stats stats.merge_apdex_metric(metric) return key
[ "def record_apdex_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for metric in metrics:\n self.record_apdex_metric(metric)", "def merge_apdex_metric(self, metric):\n\n self[0] += metric.satisfying\n self[1] += metric.tolerating\n self[2] += metric.frustrating\n\n self[3] = ((self[0] or self[1] or self[2]) and\n min(self[3], metric.apdex_t) or metric.apdex_t)\n self[4] = max(self[4], metric.apdex_t)", "def add_metric(self, name: str, metric: tm.Metric):\n # TODO add warnings if override happens\n self.__metrics[name] = metric", "def _add_single_metric(self, timestamp, metric_name, value):\n # note that this method is built this way to make it possible to\n # support live-refreshing charts in Bokeh at some point in the future.\n self._data[\"timestamp\"].append(timestamp)\n self._data[\"metric_name\"].append(metric_name)\n self._data[\"value\"].append(value)", "def write_metric(self, metric_name: str, metric_value: Union[float, int]):\n self._metrics.append(Metric(metric_name, metric_value))", "def add(self, metric):\r\n self.metrics.append(create(metric))", "def add_metric(self, metric: Metric):\n self.metric_data.append(metric)", "def log(self, metric_name: str, value: float) -> None:\n if metric_name in self.metrics:\n self.metrics[metric_name].append(value)\n else:\n self.metrics[metric_name] = [value]", "def add_metric(self, metric_name: str, metric_val: typing.Any):\n self.add_metrics({metric_name: metric_val})", "def add_metric(self, metric, *, name=None, **kwargs):\n if name is None:\n name = metric.__name__\n\n self.metrics.append((metric, name, kwargs))", "def save(self, metric_name: str, metric_value) -> None:\n self.__metrics__.__setitem__(metric_name, metric_value)\n return", "def record_custom_metric(self, name, value):\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(name)\n if stats is None:\n self.__stats_table[name] = new_stats\n else:\n stats.merge_stats(new_stats)", "def _record(self, metric_point: MetricPoint,\n measurement_map: MeasurementMap):\n metric_name = metric_point.metric_name\n tags = metric_point.tags\n\n metric = self._registry.get(metric_name)\n # Metrics should be always registered dynamically.\n assert metric\n\n tag_map = tag_map_module.TagMap()\n for key, value in tags.items():\n tag_key = tag_key_module.TagKey(key)\n tag_value = tag_value_module.TagValue(value)\n tag_map.insert(tag_key, tag_value)\n\n metric_value = metric_point.value\n measurement_map.measure_float_put(metric.measure, metric_value)\n # NOTE: When we record this metric, timestamp will be renewed.\n measurement_map.record(tag_map)", "def add_metric(self, name: str, unit: MetricUnit, value: Union[float, int]):\n if not isinstance(value, numbers.Number):\n raise MetricValueError(f\"{value} is not a valid number\")\n\n unit = self.__extract_metric_unit_value(unit=unit)\n metric = {\"Unit\": unit, \"Value\": float(value)}\n logger.debug(f\"Adding metric: {name} with {metric}\")\n self.metric_set[name] = metric\n\n if len(self.metric_set) == MAX_METRICS:\n logger.debug(f\"Exceeded maximum of {MAX_METRICS} metrics - Publishing existing metric set\")\n metrics = self.serialize_metric_set()\n print(json.dumps(metrics))\n\n # clear metric set only as opposed to metrics and dimensions set\n # since we could have more than 100 metrics\n self.metric_set.clear()", "def log_metric(self, metric: str, value: float) -> None:\n self.metadata[metric] = value", "def save(self):\n _, custom_metric = self._requestor.put('/metrics/custom/' + self._id, json={'name': self.name, 'context': self.context})\n self._data = custom_metric\n self.name = custom_metric[\"name\"]\n self.context = custom_metric[\"context\"]\n self._type = custom_metric[\"type\"]", "def add_avg(self, metric, name=None):\n\n newmetric = f\"{metric}-avg\" if name is None else name\n if newmetric in self:\n raise ValueError(f\"Cannot overwrite existing metric: {newmetric}\")\n\n self.count(newmetric, self.get_avg(metric))\n\n return self", "async def collect_metric(self, session: aiohttp.ClientSession, metric_uuid, metric, next_fetch: datetime) -> None:\n self.last_parameters[metric_uuid] = metric\n self.next_fetch[metric_uuid] = next_fetch\n if measurement := await self.collect_sources(session, metric):\n measurement[\"metric_uuid\"] = metric_uuid\n await post(session, URL(f\"{self.server_url}/internal-api/{self.API_VERSION}/measurements\"), measurement)", "def add_primary_device_metrics(self, device):\n device_message = self._metrics.configuration_data.primary_device\n message_fields = device_message.DESCRIPTOR.fields_by_name.keys()\n try:\n device_metrics_dict = device.get_device_info()\n except AttributeError:\n logging.info(\n 'Must implement get_device_info method for this controller in order to upload device metrics.'\n )\n return\n\n for metric in device_metrics_dict:\n if metric in message_fields:\n setattr(device_message, metric, device_metrics_dict[metric])\n else:\n logging.info('%s is not a valid metric field.', metric)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Record the apdex metrics supplied by the iterable for a single transaction, merging the data with any data from prior apdex metrics with the same name.
def record_apdex_metrics(self, metrics): if not self.__settings: return for metric in metrics: self.record_apdex_metric(metric)
[ "def record_transaction(self, transaction):\n\n if not self.__settings:\n return\n\n settings = self.__settings\n\n # Record the apdex, value and time metrics generated from the\n # transaction. Whether time metrics are reported as distinct\n # metrics or into a rollup is in part controlled via settings\n # for minimum number of unique metrics to be reported and thence\n # whether over a time threshold calculated as percentage of\n # overall request time, up to a maximum number of unique\n # metrics. This is intended to limit how many metrics are\n # reported for each transaction and try and cut down on an\n # explosion of unique metric names. The limits and thresholds\n # are applied after the metrics are reverse sorted based on\n # exclusive times for each metric. This ensures that the metrics\n # with greatest exclusive time are retained over those with\n # lesser time. Such metrics get reported into the performance\n # breakdown tab for specific web transactions.\n\n self.record_apdex_metrics(transaction.apdex_metrics(self))\n\n self.merge_custom_metrics(transaction.custom_metrics.metrics())\n\n self.record_time_metrics(transaction.time_metrics(self))\n\n # Capture any errors if error collection is enabled.\n # Only retain maximum number allowed per harvest.\n\n error_collector = settings.error_collector\n\n if (error_collector.enabled and settings.collect_errors and\n len(self.__transaction_errors) <\n settings.agent_limits.errors_per_harvest):\n self.__transaction_errors.extend(transaction.error_details())\n\n self.__transaction_errors = self.__transaction_errors[:\n settings.agent_limits.errors_per_harvest]\n\n if (error_collector.capture_events and\n error_collector.enabled and\n settings.collect_error_events):\n events = transaction.error_events(self.__stats_table)\n for event in events:\n self._error_events.add(event, priority=transaction.priority)\n\n # Capture any sql traces if transaction tracer enabled.\n\n if settings.slow_sql.enabled and settings.collect_traces:\n for node in transaction.slow_sql_nodes(self):\n self.record_slow_sql_node(node)\n\n # Remember as slowest transaction if transaction tracer\n # is enabled, it is over the threshold and slower than\n # any existing transaction seen for this period and in\n # the historical snapshot of slow transactions, plus\n # recording of transaction trace for this transaction\n # has not been suppressed.\n\n transaction_tracer = settings.transaction_tracer\n\n if (not transaction.suppress_transaction_trace and\n transaction_tracer.enabled and settings.collect_traces):\n\n # Transactions saved for Synthetics transactions\n # do not depend on the transaction threshold.\n\n self._update_synthetics_transaction(transaction)\n\n threshold = transaction_tracer.transaction_threshold\n\n if threshold is None:\n threshold = transaction.apdex_t * 4\n\n if transaction.duration >= threshold:\n self._update_slow_transaction(transaction)\n\n # Create the transaction event and add it to the\n # appropriate \"bucket.\" Synthetic requests are saved in one,\n # while transactions from regular requests are saved in another.\n\n if transaction.synthetics_resource_id:\n event = transaction.transaction_event(self.__stats_table)\n self._synthetics_events.add(event)\n\n elif (settings.collect_analytics_events and\n settings.transaction_events.enabled):\n\n event = transaction.transaction_event(self.__stats_table)\n self._transaction_events.add(event, priority=transaction.priority)\n\n # Merge in custom events\n\n if (settings.collect_custom_events and\n settings.custom_insights_events.enabled):\n self.custom_events.merge(transaction.custom_events)\n\n # Merge in span events\n\n if (settings.distributed_tracing.enabled and\n settings.span_events.enabled and settings.collect_span_events):\n if settings.infinite_tracing.enabled:\n for event in transaction.span_protos(settings):\n self._span_stream.put(event)\n elif transaction.sampled:\n for event in transaction.span_events(self.__settings):\n self._span_events.add(event, priority=transaction.priority)", "def flush(self):\n with self._lock:\n batch = self._batch\n timestamps = self._timestamps\n\n items = []\n for identity, value in batch.items():\n metric = {}\n typ, name, tags = identity\n metric[\"name\"] = name\n if typ:\n metric[\"type\"] = typ\n else:\n metric[\"timestamp\"] = timestamps[identity]\n\n if tags:\n metric[\"attributes\"] = dict(tags)\n\n metric[\"value\"] = value\n items.append(metric)\n\n items = tuple(items)\n\n batch.clear()\n timestamps.clear()\n\n common = self._common.copy()\n common[\"timestamp\"] = self._interval_start\n now = int(time.time() * 1000.0)\n interval = now - self._interval_start\n common[\"interval.ms\"] = interval\n\n self._interval_start = now\n\n return items, common", "def record_apdex_metric(self, metric):\n\n if not self.__settings:\n return\n\n # Note that because we are using a scope here of an empty string\n # we can potentially clash with an unscoped metric. Using None,\n # although it may help to keep them separate in the agent will\n # not make a difference to the data collector which treats None\n # as an empty string anyway.\n\n key = (metric.name, '')\n stats = self.__stats_table.get(key)\n if stats is None:\n stats = ApdexStats(apdex_t=metric.apdex_t)\n self.__stats_table[key] = stats\n stats.merge_apdex_metric(metric)\n\n return key", "def track(self, impressions, inc=1):\n keys = [Counter.CounterKey(i.feature_name, truncate_time(i.time)) for i in impressions]\n with self._lock:\n for key in keys:\n self._data[key] += inc", "def send_metrics(self):\n self.last_run = time.time()\n self.collect_stats()\n for metric in self.collapse_stats():\n self.send_to_speakeasy(metric)\n self.metrics = {}", "def log_batch(self, measurements):\n for m in measurements:\n logger.info(m)\n self.log(metric=m.metric, value=m.value, source=m.source, timestamp=m.timestamp)", "def agg_dicts(self, acc, ruptures_by_grp_id):\n if hasattr(ruptures_by_grp_id, 'calc_times'):\n for srcid, nsites, eids, dt in ruptures_by_grp_id.calc_times:\n info = self.csm.infos[srcid]\n info.num_sites += nsites\n info.calc_time += dt\n info.num_split += 1\n info.events += len(eids)\n if hasattr(ruptures_by_grp_id, 'eff_ruptures'):\n acc.eff_ruptures += ruptures_by_grp_id.eff_ruptures\n acc += ruptures_by_grp_id\n self.save_ruptures(ruptures_by_grp_id)\n return acc", "def merge_apdex_metric(self, metric):\n\n self[0] += metric.satisfying\n self[1] += metric.tolerating\n self[2] += metric.frustrating\n\n self[3] = ((self[0] or self[1] or self[2]) and\n min(self[3], metric.apdex_t) or metric.apdex_t)\n self[4] = max(self[4], metric.apdex_t)", "def report_metrics():\n\n global data_points\n\n # Set aside the current batch of metrics and initialize the list\n # used to collect metrics to empty again.\n\n with wrapt.synchronized(lock):\n pending_data_points = data_points\n data_points = []\n\n # Report the complete batch of metrics to InfluxDB in one go.\n\n if pending_data_points:\n try:\n client.write_points(pending_data_points, batch_size=10000, protocol='line')\n except Exception:\n traceback.print_exc()", "def increment_metrics(self, results):\n self.timesteps_total += sum([result['timesteps_total'] for result in results])\n self.episodes_total += len(results)\n self.generation += 1", "def _dispatch_metrics(self, payload):\n for item in payload:\n try:\n self._ingest.send(gauges=item['gauges'], counters=item['counters'])\n except Exception as e:\n self._logger.error(\"Exception while sending payload to ingest : {0}\".format(e))", "def apply_all_accumulators(self):\n self._require_state(\"APPLYING\")\n for mi in self._accums.keys():\n self._apply_one_accum_set(mi)", "def accumulate(self, batch_pred_map_cls, batch_gt_map_cls):\n bsize = len(batch_pred_map_cls)\n assert bsize == len(batch_gt_map_cls)\n for i in range(bsize):\n self.gt_map_cls[self.scan_cnt] = batch_gt_map_cls[i]\n self.pred_map_cls[self.scan_cnt] = batch_pred_map_cls[i]\n self.scan_cnt += 1", "def log_batch(self, run_id, metrics, params, tags):", "def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])", "def bulk_add(self, memento):\n if memento:\n self.logger.debug(\"Restoring in-memory representation of metrics store.\")\n for doc in pickle.loads(zlib.decompress(memento)):\n self._add(doc)", "def merge_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, other in metrics:\n key = (name, '')\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)", "def __init__(self, metrics_to_record):\n self.tape = {}\n\n for metric_name in metrics_to_record:\n self.tape[metric_name] = []", "def _update_metrics(self, updates):\n if not updates:\n return\n\n metrics = self.read_metrics()\n field_order = []\n fields = set()\n if metrics:\n field_order = list(next(iter(metrics.values())).keys())\n fields.update(field_order)\n for row in updates.values():\n for key in row.keys():\n if key not in fields:\n field_order.append(key)\n fields.add(key)\n metrics[row['parcel_id']] = row\n rows = sorted(metrics.values(), key=itemgetter('parcel_id'))\n\n with open(self.metrics_path, 'w') as file:\n writer = csv.DictWriter(file, fieldnames=field_order)\n writer.writeheader()\n writer.writerows(rows)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Record a single time metric, merging the data with any data from prior time metrics with the same name and scope.
def record_time_metric(self, metric): if not self.__settings: return # Scope is forced to be empty string if None as # scope of None is reserved for apdex metrics. key = (metric.name, metric.scope or '') stats = self.__stats_table.get(key) if stats is None: stats = TimeStats(call_count=1, total_call_time=metric.duration, total_exclusive_call_time=metric.exclusive, min_call_time=metric.duration, max_call_time=metric.duration, sum_of_squares=metric.duration ** 2) self.__stats_table[key] = stats else: stats.merge_time_metric(metric) return key
[ "def _add_to_prometheus_metrics(self, scope, data):\n\n try: created = parse(data.get(\"created_at\")).timestamp()\n except TypeError: created = 0\n try: finished = parse(data.get(\"finished_at\")).timestamp()\n except TypeError: finished = 0\n try: started = parse(data.get(\"started_at\")).timestamp()\n except TypeError: started = 0\n\n self._prometheus_metrics[scope][\"id\"].add_metric([self._project, scope], data.get(\"id\", 0))\n self._prometheus_metrics[scope][\"duration\"].add_metric([self._project, scope], data.get(\"duration\", 0))\n self._prometheus_metrics[scope][\"created_timestamp\"].add_metric([self._project, scope], created)\n self._prometheus_metrics[scope][\"finished_timestamp\"].add_metric([self._project, scope], finished)\n self._prometheus_metrics[scope][\"started_timestamp\"].add_metric([self._project, scope], started)", "def record_time_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for metric in metrics:\n self.record_time_metric(metric)", "def record_custom_metric(self, name, value):\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(name)\n if stats is None:\n self.__stats_table[name] = new_stats\n else:\n stats.merge_stats(new_stats)", "def _add_single_metric(self, timestamp, metric_name, value):\n # note that this method is built this way to make it possible to\n # support live-refreshing charts in Bokeh at some point in the future.\n self._data[\"timestamp\"].append(timestamp)\n self._data[\"metric_name\"].append(metric_name)\n self._data[\"value\"].append(value)", "def merge_time_metric(self, metric):\n\n self.merge_raw_time_metric(metric.duration, metric.exclusive)", "def addSaved(self):\n if self.savedTimespan:\n self.addToStats(self.savedTimespan[1], self.savedTimespan[0])\n print \"Added %s to stats for %s\" % self.savedTimespan\n self.savedTimespan = None", "def timing(self, metric_name, value, timestamp=None, tags=None, sample_rate=1, host=None):\n if not self._disabled:\n self._metric_aggregator.add_point(\n metric_name, tags, timestamp or time(), value, Timing, sample_rate=sample_rate, host=host\n )", "def recordTweet(self, tweetName: str, time: int) -> None:\r\n self.tweets[tweetName].add(time)", "def add_qual(qual, time):\n expand(time, 'quals', qual)\n return time", "def record_metrics(self, pid, metrics):\n\n for _metric, _metric_value in metrics.items():\n if not self.__metrics_history[pid].get(_metric):\n self.__metrics_history[pid][_metric] = []\n self.__metrics_history[pid][_metric].append(_metric_value)\n # only keep the last 2 running history for any metric\n self.__metrics_history[pid][_metric] = self.__metrics_history[pid][_metric][\n -2:\n ]", "def time_metrics(self, stats, root, parent):\n\n product = self.product\n operation = self.operation or 'other'\n target = self.target\n\n # Determine the scoped metric\n\n statement_metric_name = 'Datastore/statement/%s/%s/%s' % (product,\n target, operation)\n\n operation_metric_name = 'Datastore/operation/%s/%s' % (product,\n operation)\n\n if target:\n scoped_metric_name = statement_metric_name\n else:\n scoped_metric_name = operation_metric_name\n\n yield TimeMetric(name=scoped_metric_name, scope=root.path,\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped rollup metrics\n\n yield TimeMetric(name='Datastore/all', scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n yield TimeMetric(name='Datastore/%s/all' % product, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n if root.type == 'WebTransaction':\n yield TimeMetric(name='Datastore/allWeb', scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n yield TimeMetric(name='Datastore/%s/allWeb' % product, scope='',\n duration=self.duration, exclusive=self.exclusive)\n else:\n yield TimeMetric(name='Datastore/allOther', scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n yield TimeMetric(name='Datastore/%s/allOther' % product, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped operation metric\n\n yield TimeMetric(name=operation_metric_name, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped statement metric\n\n if target:\n yield TimeMetric(name=statement_metric_name, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped instance Metric\n\n if self.instance_hostname and self.port_path_or_id:\n\n instance_metric_name = 'Datastore/instance/%s/%s/%s' % (product,\n self.instance_hostname, self.port_path_or_id)\n\n yield TimeMetric(name=instance_metric_name, scope='',\n duration=self.duration, exclusive=self.exclusive)", "def register_record(filepath, timestamp, experiment_name, metrics):\n metrics = np.array(metrics)\n best_inds = np.argmax(metrics, axis=0)\n row_values = [timestamp, experiment_name,\n metrics[best_inds[2], 2], metrics[best_inds[2], 0], metrics[best_inds[2], 5], metrics[-1, 2], metrics[-1, 5], metrics[-1, 0],\n metrics[best_inds[1], 1], metrics[-1, 1], metrics[best_inds[3], 3], metrics[-1, 3],\n metrics[best_inds[4], 4], metrics[-1, 4]]\n\n if not os.path.exists(filepath): # Create a records file for the first time\n logger.warning(\"Records file '{}' does not exist! Creating new file ...\")\n directory = os.path.dirname(filepath)\n if not os.path.exists(directory):\n os.makedirs(directory)\n header = [\"Timestamp\", \"Name\", \"BEST DICE\", \"Epoch at BEST\", \"PrunedR at BEST\", \"Final DICE\", \"Final Pruned Ratio\", \"Final Epoch\",\n \"Best Accuracy\", \"Final Accuracy\", \"Best Precision\", \"Final Precision\", \"Best Recall\", \"Final Recall\"]\n book = xlwt.Workbook() # excel work book\n book = write_table_to_sheet([header, row_values], book, sheet_name=\"records\")\n book.save(filepath)\n else:\n try:\n export_record(filepath, row_values)\n except Exception as x:\n alt_path = os.path.join(os.path.dirname(filepath), \"record_\" + experiment_name)\n logger.error(\"Failed saving in: '{}'! Will save here instead: {}\".format(filepath, alt_path))\n export_record(alt_path, row_values)\n\n logger.info(\"Exported performance record to '{}'\".format(filepath))", "def _record(self, metric_point: MetricPoint,\n measurement_map: MeasurementMap):\n metric_name = metric_point.metric_name\n tags = metric_point.tags\n\n metric = self._registry.get(metric_name)\n # Metrics should be always registered dynamically.\n assert metric\n\n tag_map = tag_map_module.TagMap()\n for key, value in tags.items():\n tag_key = tag_key_module.TagKey(key)\n tag_value = tag_value_module.TagValue(value)\n tag_map.insert(tag_key, tag_value)\n\n metric_value = metric_point.value\n measurement_map.measure_float_put(metric.measure, metric_value)\n # NOTE: When we record this metric, timestamp will be renewed.\n measurement_map.record(tag_map)", "def save(self):\n _, custom_metric = self._requestor.put('/metrics/custom/' + self._id, json={'name': self.name, 'context': self.context})\n self._data = custom_metric\n self.name = custom_metric[\"name\"]\n self.context = custom_metric[\"context\"]\n self._type = custom_metric[\"type\"]", "def _fetch_time_metrics_and_clear(self):\n with self._time_rlock:\n time_metrics = self._time_metrics\n self._time_metrics = defaultdict(LatencyTracker)\n\n return time_metrics", "def add(self, metric):\r\n self.metrics.append(create(metric))", "def record_apdex_metric(self, metric):\n\n if not self.__settings:\n return\n\n # Note that because we are using a scope here of an empty string\n # we can potentially clash with an unscoped metric. Using None,\n # although it may help to keep them separate in the agent will\n # not make a difference to the data collector which treats None\n # as an empty string anyway.\n\n key = (metric.name, '')\n stats = self.__stats_table.get(key)\n if stats is None:\n stats = ApdexStats(apdex_t=metric.apdex_t)\n self.__stats_table[key] = stats\n stats.merge_apdex_metric(metric)\n\n return key", "def build_metrics_times_data(time_metrics):\n return [{'name': name, 'latencies': latencies.get_latencies()}\n for name, latencies in iteritems(time_metrics)]", "def add_metric(self, name: str, metric: tm.Metric):\n # TODO add warnings if override happens\n self.__metrics[name] = metric" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Record the time metrics supplied by the iterable for a single transaction, merging the data with any data from prior time metrics with the same name and scope.
def record_time_metrics(self, metrics): if not self.__settings: return for metric in metrics: self.record_time_metric(metric)
[ "def flush(self):\n with self._lock:\n batch = self._batch\n timestamps = self._timestamps\n\n items = []\n for identity, value in batch.items():\n metric = {}\n typ, name, tags = identity\n metric[\"name\"] = name\n if typ:\n metric[\"type\"] = typ\n else:\n metric[\"timestamp\"] = timestamps[identity]\n\n if tags:\n metric[\"attributes\"] = dict(tags)\n\n metric[\"value\"] = value\n items.append(metric)\n\n items = tuple(items)\n\n batch.clear()\n timestamps.clear()\n\n common = self._common.copy()\n common[\"timestamp\"] = self._interval_start\n now = int(time.time() * 1000.0)\n interval = now - self._interval_start\n common[\"interval.ms\"] = interval\n\n self._interval_start = now\n\n return items, common", "def _add_to_prometheus_metrics(self, scope, data):\n\n try: created = parse(data.get(\"created_at\")).timestamp()\n except TypeError: created = 0\n try: finished = parse(data.get(\"finished_at\")).timestamp()\n except TypeError: finished = 0\n try: started = parse(data.get(\"started_at\")).timestamp()\n except TypeError: started = 0\n\n self._prometheus_metrics[scope][\"id\"].add_metric([self._project, scope], data.get(\"id\", 0))\n self._prometheus_metrics[scope][\"duration\"].add_metric([self._project, scope], data.get(\"duration\", 0))\n self._prometheus_metrics[scope][\"created_timestamp\"].add_metric([self._project, scope], created)\n self._prometheus_metrics[scope][\"finished_timestamp\"].add_metric([self._project, scope], finished)\n self._prometheus_metrics[scope][\"started_timestamp\"].add_metric([self._project, scope], started)", "def time_metrics(self, stats, root, parent):\n\n product = self.product\n operation = self.operation or 'other'\n target = self.target\n\n # Determine the scoped metric\n\n statement_metric_name = 'Datastore/statement/%s/%s/%s' % (product,\n target, operation)\n\n operation_metric_name = 'Datastore/operation/%s/%s' % (product,\n operation)\n\n if target:\n scoped_metric_name = statement_metric_name\n else:\n scoped_metric_name = operation_metric_name\n\n yield TimeMetric(name=scoped_metric_name, scope=root.path,\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped rollup metrics\n\n yield TimeMetric(name='Datastore/all', scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n yield TimeMetric(name='Datastore/%s/all' % product, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n if root.type == 'WebTransaction':\n yield TimeMetric(name='Datastore/allWeb', scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n yield TimeMetric(name='Datastore/%s/allWeb' % product, scope='',\n duration=self.duration, exclusive=self.exclusive)\n else:\n yield TimeMetric(name='Datastore/allOther', scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n yield TimeMetric(name='Datastore/%s/allOther' % product, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped operation metric\n\n yield TimeMetric(name=operation_metric_name, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped statement metric\n\n if target:\n yield TimeMetric(name=statement_metric_name, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped instance Metric\n\n if self.instance_hostname and self.port_path_or_id:\n\n instance_metric_name = 'Datastore/instance/%s/%s/%s' % (product,\n self.instance_hostname, self.port_path_or_id)\n\n yield TimeMetric(name=instance_metric_name, scope='',\n duration=self.duration, exclusive=self.exclusive)", "def record_time_metric(self, metric):\n\n if not self.__settings:\n return\n\n # Scope is forced to be empty string if None as\n # scope of None is reserved for apdex metrics.\n\n key = (metric.name, metric.scope or '')\n stats = self.__stats_table.get(key)\n if stats is None:\n stats = TimeStats(call_count=1,\n total_call_time=metric.duration,\n total_exclusive_call_time=metric.exclusive,\n min_call_time=metric.duration,\n max_call_time=metric.duration,\n sum_of_squares=metric.duration ** 2)\n self.__stats_table[key] = stats\n else:\n stats.merge_time_metric(metric)\n\n return key", "def record_transaction(self, transaction):\n\n if not self.__settings:\n return\n\n settings = self.__settings\n\n # Record the apdex, value and time metrics generated from the\n # transaction. Whether time metrics are reported as distinct\n # metrics or into a rollup is in part controlled via settings\n # for minimum number of unique metrics to be reported and thence\n # whether over a time threshold calculated as percentage of\n # overall request time, up to a maximum number of unique\n # metrics. This is intended to limit how many metrics are\n # reported for each transaction and try and cut down on an\n # explosion of unique metric names. The limits and thresholds\n # are applied after the metrics are reverse sorted based on\n # exclusive times for each metric. This ensures that the metrics\n # with greatest exclusive time are retained over those with\n # lesser time. Such metrics get reported into the performance\n # breakdown tab for specific web transactions.\n\n self.record_apdex_metrics(transaction.apdex_metrics(self))\n\n self.merge_custom_metrics(transaction.custom_metrics.metrics())\n\n self.record_time_metrics(transaction.time_metrics(self))\n\n # Capture any errors if error collection is enabled.\n # Only retain maximum number allowed per harvest.\n\n error_collector = settings.error_collector\n\n if (error_collector.enabled and settings.collect_errors and\n len(self.__transaction_errors) <\n settings.agent_limits.errors_per_harvest):\n self.__transaction_errors.extend(transaction.error_details())\n\n self.__transaction_errors = self.__transaction_errors[:\n settings.agent_limits.errors_per_harvest]\n\n if (error_collector.capture_events and\n error_collector.enabled and\n settings.collect_error_events):\n events = transaction.error_events(self.__stats_table)\n for event in events:\n self._error_events.add(event, priority=transaction.priority)\n\n # Capture any sql traces if transaction tracer enabled.\n\n if settings.slow_sql.enabled and settings.collect_traces:\n for node in transaction.slow_sql_nodes(self):\n self.record_slow_sql_node(node)\n\n # Remember as slowest transaction if transaction tracer\n # is enabled, it is over the threshold and slower than\n # any existing transaction seen for this period and in\n # the historical snapshot of slow transactions, plus\n # recording of transaction trace for this transaction\n # has not been suppressed.\n\n transaction_tracer = settings.transaction_tracer\n\n if (not transaction.suppress_transaction_trace and\n transaction_tracer.enabled and settings.collect_traces):\n\n # Transactions saved for Synthetics transactions\n # do not depend on the transaction threshold.\n\n self._update_synthetics_transaction(transaction)\n\n threshold = transaction_tracer.transaction_threshold\n\n if threshold is None:\n threshold = transaction.apdex_t * 4\n\n if transaction.duration >= threshold:\n self._update_slow_transaction(transaction)\n\n # Create the transaction event and add it to the\n # appropriate \"bucket.\" Synthetic requests are saved in one,\n # while transactions from regular requests are saved in another.\n\n if transaction.synthetics_resource_id:\n event = transaction.transaction_event(self.__stats_table)\n self._synthetics_events.add(event)\n\n elif (settings.collect_analytics_events and\n settings.transaction_events.enabled):\n\n event = transaction.transaction_event(self.__stats_table)\n self._transaction_events.add(event, priority=transaction.priority)\n\n # Merge in custom events\n\n if (settings.collect_custom_events and\n settings.custom_insights_events.enabled):\n self.custom_events.merge(transaction.custom_events)\n\n # Merge in span events\n\n if (settings.distributed_tracing.enabled and\n settings.span_events.enabled and settings.collect_span_events):\n if settings.infinite_tracing.enabled:\n for event in transaction.span_protos(settings):\n self._span_stream.put(event)\n elif transaction.sampled:\n for event in transaction.span_events(self.__settings):\n self._span_events.add(event, priority=transaction.priority)", "def build_metrics_times_data(time_metrics):\n return [{'name': name, 'latencies': latencies.get_latencies()}\n for name, latencies in iteritems(time_metrics)]", "async def update_trade_stats(self):\n\n summary_keys = [base for base in config['min_base_volumes']] + ['global']\n summaries = {\n key: {\n 'open_count': 0,\n 'buys': 0,\n 'rebuys': 0,\n 'sells': 0,\n 'collect_sells': 0,\n 'soft_stop_sells': 0,\n 'total_profit': 0.0,\n 'total_loss': 0.0,\n 'total_fees': 0.0,\n 'balancer_refills': 0,\n 'balancer_remits': 0,\n 'balancer_stop_losses': 0,\n 'balancer_profit': 0.0,\n 'balancer_loss': 0.0,\n 'balancer_fees': 0.0,\n } for key in summary_keys\n }\n\n for pair in self.trades:\n if pair not in self.trade_stats[self.time_prefix]:\n continue\n\n base = pair.split('-', 1)[0]\n open_count = len(self.trades[pair]['open'])\n\n summaries[base]['open_count'] += open_count\n summaries[base]['buys'] += self.trade_stats[self.time_prefix][pair]['buys']\n summaries[base]['rebuys'] += self.trade_stats[self.time_prefix][pair]['rebuys']\n summaries[base]['sells'] += self.trade_stats[self.time_prefix][pair]['sells']\n summaries[base]['collect_sells'] += self.trade_stats[self.time_prefix][pair]['collect_sells']\n summaries[base]['soft_stop_sells'] += self.trade_stats[self.time_prefix][pair]['soft_stop_sells']\n summaries[base]['total_profit'] += self.trade_stats[self.time_prefix][pair]['total_profit']\n summaries[base]['total_loss'] += self.trade_stats[self.time_prefix][pair]['total_loss']\n summaries[base]['total_fees'] += self.trade_stats[self.time_prefix][pair]['total_fees']\n summaries[base]['balancer_refills'] += self.trade_stats[self.time_prefix][pair]['balancer_refills']\n summaries[base]['balancer_remits'] += self.trade_stats[self.time_prefix][pair]['balancer_remits']\n summaries[base]['balancer_profit'] += self.trade_stats[self.time_prefix][pair]['balancer_profit']\n summaries[base]['balancer_loss'] += self.trade_stats[self.time_prefix][pair]['balancer_loss']\n summaries[base]['balancer_fees'] += self.trade_stats[self.time_prefix][pair]['balancer_fees']\n\n summaries['global']['open_count'] += open_count\n summaries['global']['buys'] += self.trade_stats[self.time_prefix][pair]['buys']\n summaries['global']['rebuys'] += self.trade_stats[self.time_prefix][pair]['rebuys']\n summaries['global']['sells'] += self.trade_stats[self.time_prefix][pair]['sells']\n summaries['global']['collect_sells'] += self.trade_stats[self.time_prefix][pair]['collect_sells']\n summaries['global']['soft_stop_sells'] += self.trade_stats[self.time_prefix][pair]['soft_stop_sells']\n summaries['global']['total_profit'] += self.trade_stats[self.time_prefix][pair]['total_profit']\n summaries['global']['total_loss'] += self.trade_stats[self.time_prefix][pair]['total_loss']\n summaries['global']['total_fees'] += self.trade_stats[self.time_prefix][pair]['total_fees']\n summaries['global']['balancer_refills'] += self.trade_stats[self.time_prefix][pair]['balancer_refills']\n summaries['global']['balancer_remits'] += self.trade_stats[self.time_prefix][pair]['balancer_remits']\n summaries['global']['balancer_profit'] += self.trade_stats[self.time_prefix][pair]['balancer_profit']\n summaries['global']['balancer_loss'] += self.trade_stats[self.time_prefix][pair]['balancer_loss']\n summaries['global']['balancer_fees'] += self.trade_stats[self.time_prefix][pair]['balancer_fees']\n\n for key in summaries:\n self.trade_stats[self.time_prefix][key]['buys'] = summaries[key]['buys']\n self.trade_stats[self.time_prefix][key]['rebuys'] = summaries[key]['rebuys']\n self.trade_stats[self.time_prefix][key]['sells'] = summaries[key]['sells']\n self.trade_stats[self.time_prefix][key]['collect_sells'] = summaries[key]['collect_sells']\n self.trade_stats[self.time_prefix][key]['soft_stop_sells'] = summaries[key]['soft_stop_sells']\n self.trade_stats[self.time_prefix][key]['total_profit'] = summaries[key]['total_profit']\n self.trade_stats[self.time_prefix][key]['total_loss'] = summaries[key]['total_loss']\n self.trade_stats[self.time_prefix][key]['total_fees'] = summaries[key]['total_fees']\n self.trade_stats[self.time_prefix][key]['balancer_refills'] = summaries[key]['balancer_refills']\n self.trade_stats[self.time_prefix][key]['balancer_remits'] = summaries[key]['balancer_remits']\n self.trade_stats[self.time_prefix][key]['balancer_profit'] = summaries[key]['balancer_profit']\n self.trade_stats[self.time_prefix][key]['balancer_loss'] = summaries[key]['balancer_loss']\n self.trade_stats[self.time_prefix][key]['balancer_fees'] = summaries[key]['balancer_fees']\n\n if summaries[key]['open_count'] > self.trade_stats[self.time_prefix][key]['most_open']:\n self.trade_stats[self.time_prefix][key]['most_open'] = summaries[key]['open_count']\n\n filter_items = [pair for pair in self.trades] + [base for base in config['min_base_volumes']] + ['global']\n self.save_attr('trade_stats', max_depth=2, filter_items=filter_items, filter_keys=[self.time_prefix])", "def send_metrics(self):\n self.last_run = time.time()\n self.collect_stats()\n for metric in self.collapse_stats():\n self.send_to_speakeasy(metric)\n self.metrics = {}", "def with_time(self):\n if self.time_slices is None:\n raise FeatureError(\"Feature has no time reference.\")\n\n for i, datum in enumerate(self.data[self.name]):\n yield (self.time_slices[i], datum)", "def _fetch_time_metrics_and_clear(self):\n with self._time_rlock:\n time_metrics = self._time_metrics\n self._time_metrics = defaultdict(LatencyTracker)\n\n return time_metrics", "def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])", "def record_custom_metric(self, name, value):\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(name)\n if stats is None:\n self.__stats_table[name] = new_stats\n else:\n stats.merge_stats(new_stats)", "def agg_dicts(self, acc, ruptures_by_grp_id):\n if hasattr(ruptures_by_grp_id, 'calc_times'):\n for srcid, nsites, eids, dt in ruptures_by_grp_id.calc_times:\n info = self.csm.infos[srcid]\n info.num_sites += nsites\n info.calc_time += dt\n info.num_split += 1\n info.events += len(eids)\n if hasattr(ruptures_by_grp_id, 'eff_ruptures'):\n acc.eff_ruptures += ruptures_by_grp_id.eff_ruptures\n acc += ruptures_by_grp_id\n self.save_ruptures(ruptures_by_grp_id)\n return acc", "def with_time(self):\n key = list(self.keys())[0]\n length = len(self[key])\n time_slices = self[key].time_slices\n\n if time_slices is None:\n raise FeatureError(\"FeatureCollection has no time reference.\")\n\n for i in range(length):\n res = {}\n for key, feature in self.items():\n res[key] = feature.data[feature.name][i]\n yield (time_slices[i], res)", "def _merge_report(self, target, new):\n time = None\n if 'ts' in new['parsed']:\n time = new['parsed']['ts']\n\n if (target.get('lastSeenDate', None) and\n time and\n target['lastSeenDate'] < time):\n target['lastSeenDate'] = time\n\n query_millis = int(new['parsed']['stats']['millis'])\n target['stats']['totalTimeMillis'] += query_millis\n target['stats']['count'] += 1\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']", "def report_metrics():\n\n global data_points\n\n # Set aside the current batch of metrics and initialize the list\n # used to collect metrics to empty again.\n\n with wrapt.synchronized(lock):\n pending_data_points = data_points\n data_points = []\n\n # Report the complete batch of metrics to InfluxDB in one go.\n\n if pending_data_points:\n try:\n client.write_points(pending_data_points, batch_size=10000, protocol='line')\n except Exception:\n traceback.print_exc()", "def addSaved(self):\n if self.savedTimespan:\n self.addToStats(self.savedTimespan[1], self.savedTimespan[0])\n print \"Added %s to stats for %s\" % self.savedTimespan\n self.savedTimespan = None", "def _flush():\n log.debug(\"Flushing metrics\")\n now = util.ts()\n if hasattr(metrics, 'old_timestamp'):\n lag = now - metrics.old_timestamp - (settings.flush_interval / 1000)\n with metrics.lock:\n metrics.gauges[settings.timestamp_lag_namespace] = lag\n # Only flush writes to 'old_timestamp' so we're not going to lock\n metrics.old_timestamp = now\n\n with metrics.lock:\n data = {\n 'counters': metrics.counters.copy(),\n 'gauges': metrics.gauges.copy(),\n 'timers': metrics.timers.copy(),\n 'timer_counters': metrics.timer_counters.copy(),\n 'sets': metrics.sets.copy(),\n 'counter_rates': metrics.counter_rates.copy(),\n 'timer_data': metrics.timer_data.copy(),\n 'percent_threshold': settings.percent_threshold,\n 'histogram': settings.histogram,\n }\n\n _clear_metrics()\n\n statistics.process_metrics(data, settings.flush_interval, _processed)", "def _get_timing_entries(self):\n \n open_events = {} # t_sim -> TimingEntry\n duplicates = {} # t_sim -> int (Number of concurrent instantiations)\n \n for csv_row in self._csv_reader:\n (t_sim, t_real, action) = self._extract_timing_record_row(csv_row)\n \n if action == '0' or action == '1': # external event or prediction\n pre = action == '1'\n self._add_new_event(open_events, duplicates, t_sim, t_real, pre)\n \n elif action == '3' and t_sim in open_events: # Begin distribution\n open_events[t_sim].set_begin_distribution_time(t_real)\n \n elif action == '2' or action == '4': # End distribution or outdated\n if action == '2' and t_sim in open_events:\n open_events[t_sim].set_end_distribution_time(t_real)\n \n entry = self._finalize_event(open_events, duplicates, t_sim)\n \n if entry is not None:\n yield entry" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if transaction is the slowest transaction and update accordingly.
def _update_slow_transaction(self, transaction): slowest = 0 name = transaction.path if self.__slow_transaction: slowest = self.__slow_transaction.duration if name in self.__slow_transaction_map: slowest = max(self.__slow_transaction_map[name], slowest) if transaction.duration > slowest: # We are going to replace the prior slow transaction. # We need to be a bit tricky here. If we are overriding # an existing slow transaction for a different name, # then we need to restore in the transaction map what # the previous slowest duration was for that, or remove # it if there wasn't one. This is so we do not incorrectly # suppress it given that it was never actually reported # as the slowest transaction. if self.__slow_transaction: if self.__slow_transaction.path != name: if self.__slow_transaction_old_duration: self.__slow_transaction_map[ self.__slow_transaction.path] = ( self.__slow_transaction_old_duration) else: del self.__slow_transaction_map[ self.__slow_transaction.path] if name in self.__slow_transaction_map: self.__slow_transaction_old_duration = ( self.__slow_transaction_map[name]) else: self.__slow_transaction_old_duration = None self.__slow_transaction = transaction self.__slow_transaction_map[name] = transaction.duration
[ "def LastTransaction(self) -> bool:", "def merge_slow_sql_node(self, node):\n\n duration = node.duration\n\n self[1] += duration\n self[2] = self[0] and min(self[2], duration) or duration\n self[3] = max(self[3], duration)\n\n if self[3] == duration:\n self[4] = node\n\n # Must update the call count last as update of the\n # minimum call time is dependent on initial value.\n\n self[0] += 1", "def slow_update_duration(self):\n for i in range(len(self.data_file.sorted_data)):\n if self.data_file.sorted_data[i]['type'] == 'slow':\n slow_upd = self.data_file.sorted_data[i]['timestamp']\n Config.ANALYSIS.write(f\"slow at: {slow_upd}\\n\")\n if i == 0:\n after_slow = self.data_file.sorted_data[i + 1]['timestamp']\n Config.ANALYSIS.write(f\"after slow: ({slow_upd}, {after_slow}) \"\n f\"= {after_slow - slow_upd}\\n\\n\")\n elif i == len(self.data_file.sorted_data) - 1:\n before_slow = self.data_file.sorted_data[i - 1]['timestamp']\n Config.ANALYSIS.write(f\"before slow: ({before_slow}, {slow_upd}) \"\n f\"= {slow_upd - before_slow}\\n\\n\")\n else:\n before_slow = self.data_file.sorted_data[i - 1]['timestamp']\n after_slow = self.data_file.sorted_data[i + 1]['timestamp']\n Config.ANALYSIS.write(f\"before slow: ({before_slow}, {slow_upd}) \"\n f\"= {slow_upd - before_slow}\\n\")\n Config.ANALYSIS.write(f\"after slow: ({slow_upd}, {after_slow}) \"\n f\"= {after_slow - slow_upd}\\n\\n\")\n Config.ANALYSIS.write(\"\\n\\n\")", "def test_commit_optimize(self):\n # Same id, data and user_id\n id = data = user_id = get_rand_string()\n self.conn.add(id=id, user_id=user_id, data=data)\n\n # Make sure the changes weren't commited.\n results = self.conn.query(\"id:\" + id).results\n self.assertEquals(len(results), 0,\n (\"Changes to index shouldn't be visible without commiting, \"\n \"results:%s\" % (repr(results))))\n\n # Optimizing commits the changes\n self.conn.commit(_optimize=True)\n\n results = self.conn.query(\"id:\" + id).results\n self.assertEquals(len(results), 1,\n \"No documents returned, results:%s\" % (repr(results)))", "def test_commit_optimize(self):\n # Same id, data and user_id\n id = get_rand_string()\n self.add(id=id, user_id=id, data=id)\n\n # Make sure the changes weren't commited.\n results = self.query(self.conn, \"id:\" + id).results\n self.assertEquals(len(results), 0,\n (\"Changes to index shouldn't be visible without commiting, \"\n \"results:%s\" % (repr(results))))\n\n # Optimizing commits the changes\n self.conn.commit(_optimize=True)\n\n results = self.query(self.conn, \"id:\" + id).results\n self.assertEquals(len(results), 1,\n \"No documents returned, results:%s\" % (repr(results)))", "def test_update_transaction_dispute(self):\n pass", "def update_to_t(self,time):\n if not isinstance(time, pd.Timestamp):\n raise TypeError(\"time supplied to aupdate_to_t should be \\\n a pandas timestamp\")\n if time>self.last_time_checked:\n tmp_orders = self.order_manager.get_open_orders_info()\n for id, o in tmp_orders.items():\n if time>=o['time_executed']:\n self.order_manager.execute_order(id,self.asset_manager)\n self.asset_manager.update_to_time(time)\n self.total_trades += 1\n\n self.last_time_checked = time\n self.asset_manager.add_to_history(self.broker,time)", "def InTransaction(self) -> bool:", "def test_partial_update_transaction(self):\n pass", "def for_update_sql(self, nowait=False):\n if nowait:\n return 'FOR UPDATE NOWAIT'\n else:\n return 'FOR UPDATE'", "def update_batchtask_back(cls, tb: TaskBatchBack) -> bool:\n return DbManager._currdb.update_batchtask_back(tb)", "def _should_try_reoptimize(self, last_statistics_refresh_time: timedelta, last_event: Event):\n if last_statistics_refresh_time is None:\n return True\n return last_event.max_timestamp - last_statistics_refresh_time > self.__statistics_update_time_window", "def supports_transactions(self):\n return self._mysql_storage_engine != \"MyISAM\"", "def test_update_transaction(self):\n pass", "def update_highest_buy(self, limit):\n if limit.size == 0:\n #predecessor case\n limit = self.buy_tree.predecessor(limit)\n if limit is None:\n #no predecessor\n self.highest_buy = None\n else: # have a predecessor but dont know if it has order or not\n if limit.size == 0: #limit has no order but other limits in the tree might have orders\n if self.buy_tree.size == 0: #we know no other limits have an order\n self.highest_buy = None\n else: #other limits have an order\n while limit.size == 0:\n limit = self.buy_tree.predecessor(limit)\n #now our limit has a valid order\n self.highest_buy = limit.price\n else: #found valid pred\n self.highest_buy = limit.price", "def runs_slowly(self):\n return self.has_code_coverage() or self.is_asan() or self.is_tsan() or self.is_ubsan()", "def is_best(self, val) -> bool:\n if self.val is None or (val > self.val):\n self.val = val\n print(\"Updating Best\")\n return True\n else:\n return False", "def TransactionCommitted(self) -> bool:", "async def short_sync_backtrack(\n self, peer: WSChiaConnection, peak_height: uint32, target_height: uint32, target_unf_hash: bytes32\n ) -> bool:\n try:\n if peer.peer_node_id not in self.sync_store.backtrack_syncing:\n self.sync_store.backtrack_syncing[peer.peer_node_id] = 0\n self.sync_store.backtrack_syncing[peer.peer_node_id] += 1\n\n unfinished_block: Optional[UnfinishedBlock] = self.full_node_store.get_unfinished_block(target_unf_hash)\n curr_height: int = target_height\n found_fork_point = False\n blocks = []\n while curr_height > peak_height - 5:\n # If we already have the unfinished block, don't fetch the transactions. In the normal case, we will\n # already have the unfinished block, from when it was broadcast, so we just need to download the header,\n # but not the transactions\n fetch_tx: bool = unfinished_block is None or curr_height != target_height\n curr = await peer.call_api(\n FullNodeAPI.request_block, full_node_protocol.RequestBlock(uint32(curr_height), fetch_tx)\n )\n if curr is None:\n raise ValueError(f\"Failed to fetch block {curr_height} from {peer.get_peer_logging()}, timed out\")\n if curr is None or not isinstance(curr, full_node_protocol.RespondBlock):\n raise ValueError(\n f\"Failed to fetch block {curr_height} from {peer.get_peer_logging()}, wrong type {type(curr)}\"\n )\n blocks.append(curr.block)\n if self.blockchain.contains_block(curr.block.prev_header_hash) or curr_height == 0:\n found_fork_point = True\n break\n curr_height -= 1\n if found_fork_point:\n for block in reversed(blocks):\n await self.add_block(block, peer)\n except (asyncio.CancelledError, Exception):\n self.sync_store.backtrack_syncing[peer.peer_node_id] -= 1\n raise\n\n self.sync_store.backtrack_syncing[peer.peer_node_id] -= 1\n return found_fork_point" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if transaction is a synthetics trace and save it to __synthetics_transactions.
def _update_synthetics_transaction(self, transaction): settings = self.__settings if not transaction.synthetics_resource_id: return maximum = settings.agent_limits.synthetics_transactions if len(self.__synthetics_transactions) < maximum: self.__synthetics_transactions.append(transaction)
[ "def isSStx(tx):\n try:\n checkSStx(tx)\n\n except Exception as e:\n log.debug(\"isSStx: {}\".format(e))\n\n else:\n return True", "def save(self, trade: Trade) -> Trade:\n\n pass # pragma: no cover", "def isTx(self):\n\t\treturn self.extension == '.tx'", "def trace2Snapshot(self,trace,timefrac,smodel,G):\n maxtime = max([trace[node][state] for node in trace.keys() for state in trace[node].keys()])\n curtime = int(round(timefrac * maxtime))\n curstate = {self.INFECTED: set(), self.SUSCEPTIBLE: set([node for node in G.nodes() if not trace.has_key(node)]), self.RECOVERED: set(), self.EXPOSED: set()}\n for node in G.nodes():\n if not trace.has_key(node):\n curstate[self.SUSCEPTIBLE].add(node)\n continue\n etime, itime, rtime = maxtime*1000, maxtime*1000, maxtime*1000\n if trace[node].has_key(self.EXPOSED):\n etime = trace[node][self.EXPOSED]\n if trace[node].has_key(self.INFECTED):\n itime = trace[node][self.INFECTED]\n if trace[node].has_key(self.RECOVERED):\n rtime = trace[node][self.RECOVERED]\n if smodel in [\"si\", \"sis\",\"sir\"] and curtime < itime:\n curstate[self.SUSCEPTIBLE].add(node)\n elif smodel == \"seir\" and curtime < etime:\n curstate[self.SUSCEPTIBLE].add(node)\n elif curtime >= etime and curtime < itime:\n curstate[self.EXPOSED].add(node)\n elif curtime >= itime and curtime < rtime:\n curstate[self.INFECTED].add(node)\n elif curtime >= rtime:\n curstate[self.RECOVERED].add(node)\n return curstate", "def trackTrans(self):\n self.data_struct['_trackTrans'] = True", "def is_transaction(self) -> bool:\n return self._is_txn", "def _is_trace_on():\n return AceQLHttpApi.is_trace_on()", "def is_transaction(self):\n return self._request.has_var(\"_transid\")", "def isSTraced(self):\n try:\n return self.sTraced\n except AttributeError:\n self.sTraced = False\n return False", "def report(self, trade, is_entry):\n pass", "def save(self, *args, **kwargs):\n known_treatment(self.treat_type)\n super(Treatment, self).save(*args, **kwargs)", "def transaction_trace_data(self, connections):\n\n _logger.debug('Generating transaction trace data.')\n\n if not self.__settings:\n return []\n\n # Create a set 'traces' that is a union of slow transaction,\n # and Synthetics transactions. This ensures we don't send\n # duplicates of a transaction.\n\n traces = set()\n if self.__slow_transaction:\n traces.add(self.__slow_transaction)\n traces.update(self.__synthetics_transactions)\n\n # Return an empty list if no transactions were captured.\n\n if not traces:\n return []\n\n # We want to limit the number of explain plans we do across\n # these. So work out what were the slowest and tag them.\n # Later the explain plan will only be run on those which are\n # tagged.\n\n agent_limits = self.__settings.agent_limits\n explain_plan_limit = agent_limits.sql_explain_plans_per_harvest\n maximum_nodes = agent_limits.transaction_traces_nodes\n\n database_nodes = []\n\n if explain_plan_limit != 0:\n for trace in traces:\n for node in trace.slow_sql:\n # Make sure we clear any flag for explain plans on\n # the nodes in case a transaction trace was merged\n # in from previous harvest period.\n\n node.generate_explain_plan = False\n\n # Node should be excluded if not for an operation\n # that we can't do an explain plan on. Also should\n # not be one which would not be included in the\n # transaction trace because limit was reached.\n\n if (node.node_count < maximum_nodes and\n node.connect_params and node.statement.operation in\n node.statement.database.explain_stmts):\n database_nodes.append(node)\n\n database_nodes = sorted(database_nodes,\n key=lambda x: x.duration)[-explain_plan_limit:]\n\n for node in database_nodes:\n node.generate_explain_plan = True\n\n else:\n for trace in traces:\n for node in trace.slow_sql:\n node.generate_explain_plan = True\n database_nodes.append(node)\n\n # Now generate the transaction traces. We need to cap the\n # number of nodes capture to the specified limit.\n\n trace_data = []\n\n for trace in traces:\n transaction_trace = trace.transaction_trace(\n self, maximum_nodes, connections)\n\n data = [transaction_trace,\n list(trace.string_table.values())]\n\n if self.__settings.debug.log_transaction_trace_payload:\n _logger.debug('Encoding slow transaction data where '\n 'payload=%r.', data)\n\n json_data = json_encode(data)\n\n level = self.__settings.agent_limits.data_compression_level\n level = level or zlib.Z_DEFAULT_COMPRESSION\n\n zlib_data = zlib.compress(six.b(json_data), level)\n\n pack_data = base64.standard_b64encode(zlib_data)\n\n if six.PY3:\n pack_data = pack_data.decode('Latin-1')\n\n root = transaction_trace.root\n\n if trace.record_tt:\n force_persist = True\n else:\n force_persist = False\n\n if trace.include_transaction_trace_request_uri:\n request_uri = trace.request_uri\n else:\n request_uri = None\n\n trace_data.append([transaction_trace.start_time,\n root.end_time - root.start_time,\n trace.path,\n request_uri,\n pack_data,\n trace.guid,\n None,\n force_persist,\n None,\n trace.synthetics_resource_id, ])\n\n return trace_data", "def isTraceback(self):\r\n return self._wrap(type(self.obj) is TracebackType)", "def _is_simulated_trade(entity):\n return entity.record_type == \"Trade\" and entity.status == \"Simulated\"", "def _apply_trx_trade_to_allocation(cls, allocation, block_trade):\n try:\n allocation.TrxTrade(block_trade)\n allocation.Commit()\n except Exception as e:\n error_message = 'Failed to stamp TrxTrade {0} on Allocation Trade: {1} , {2}'\n LOGGER.exception(error_message.format(block_trade.Oid(), allocation.Oid(), e))\n return False\n\n return True", "def _track_tx_spending(tx, utxoset):\n # track the outputs being spent in this tx, by removing each output being spent\n # from utxoset, and setting it on input spending it\n for txin in tx.inputs:\n if txin.is_coinbase:\n continue\n spending_info = utxoset.spend(txin._spent_txid, txin.spent_output_idx)\n txin.spending_info = spending_info\n # add outputs of new tx to utxoset\n utxoset.add_from_tx(tx)", "def sign_transaction_essence(self, prepared_transaction_data):\n return self._call_account_method(\n 'signTransactionEssence', {\n 'preparedTransactionData': prepared_transaction_data\n }\n )", "async def add_trade_record(self, record: TradeRecord, in_transaction) -> None:\n if not in_transaction:\n await self.db_wrapper.lock.acquire()\n try:\n cursor = await self.db_connection.execute(\n \"INSERT OR REPLACE INTO trade_records VALUES(?, ?, ?, ?, ?, ?)\",\n (\n bytes(record),\n record.trade_id.hex(),\n record.status,\n record.confirmed_at_index,\n record.created_at_time,\n record.sent,\n ),\n )\n await cursor.close()\n finally:\n if not in_transaction:\n await self.db_connection.commit()\n self.db_wrapper.lock.release()", "def signSign(self):\r\n if \"signature\" in self: # or \"signatures\" in self ?\r\n self.pop(\"id\", False)\r\n try:\r\n self[\"signSignature\"] = dposlib.core.crypto.getSignature(\r\n self, self._secondPrivateKey,\r\n exclude_second_sig=True,\r\n )\r\n except AttributeError:\r\n raise Exception(\"no second private Key available\")\r\n else:\r\n raise Exception(\"transaction not signed\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Record any apdex and time metrics for the transaction as well as any errors which occurred for the transaction. If the transaction qualifies to become the slow transaction remember it for later.
def record_transaction(self, transaction): if not self.__settings: return settings = self.__settings # Record the apdex, value and time metrics generated from the # transaction. Whether time metrics are reported as distinct # metrics or into a rollup is in part controlled via settings # for minimum number of unique metrics to be reported and thence # whether over a time threshold calculated as percentage of # overall request time, up to a maximum number of unique # metrics. This is intended to limit how many metrics are # reported for each transaction and try and cut down on an # explosion of unique metric names. The limits and thresholds # are applied after the metrics are reverse sorted based on # exclusive times for each metric. This ensures that the metrics # with greatest exclusive time are retained over those with # lesser time. Such metrics get reported into the performance # breakdown tab for specific web transactions. self.record_apdex_metrics(transaction.apdex_metrics(self)) self.merge_custom_metrics(transaction.custom_metrics.metrics()) self.record_time_metrics(transaction.time_metrics(self)) # Capture any errors if error collection is enabled. # Only retain maximum number allowed per harvest. error_collector = settings.error_collector if (error_collector.enabled and settings.collect_errors and len(self.__transaction_errors) < settings.agent_limits.errors_per_harvest): self.__transaction_errors.extend(transaction.error_details()) self.__transaction_errors = self.__transaction_errors[: settings.agent_limits.errors_per_harvest] if (error_collector.capture_events and error_collector.enabled and settings.collect_error_events): events = transaction.error_events(self.__stats_table) for event in events: self._error_events.add(event, priority=transaction.priority) # Capture any sql traces if transaction tracer enabled. if settings.slow_sql.enabled and settings.collect_traces: for node in transaction.slow_sql_nodes(self): self.record_slow_sql_node(node) # Remember as slowest transaction if transaction tracer # is enabled, it is over the threshold and slower than # any existing transaction seen for this period and in # the historical snapshot of slow transactions, plus # recording of transaction trace for this transaction # has not been suppressed. transaction_tracer = settings.transaction_tracer if (not transaction.suppress_transaction_trace and transaction_tracer.enabled and settings.collect_traces): # Transactions saved for Synthetics transactions # do not depend on the transaction threshold. self._update_synthetics_transaction(transaction) threshold = transaction_tracer.transaction_threshold if threshold is None: threshold = transaction.apdex_t * 4 if transaction.duration >= threshold: self._update_slow_transaction(transaction) # Create the transaction event and add it to the # appropriate "bucket." Synthetic requests are saved in one, # while transactions from regular requests are saved in another. if transaction.synthetics_resource_id: event = transaction.transaction_event(self.__stats_table) self._synthetics_events.add(event) elif (settings.collect_analytics_events and settings.transaction_events.enabled): event = transaction.transaction_event(self.__stats_table) self._transaction_events.add(event, priority=transaction.priority) # Merge in custom events if (settings.collect_custom_events and settings.custom_insights_events.enabled): self.custom_events.merge(transaction.custom_events) # Merge in span events if (settings.distributed_tracing.enabled and settings.span_events.enabled and settings.collect_span_events): if settings.infinite_tracing.enabled: for event in transaction.span_protos(settings): self._span_stream.put(event) elif transaction.sampled: for event in transaction.span_events(self.__settings): self._span_events.add(event, priority=transaction.priority)
[ "def _update_slow_transaction(self, transaction):\n\n slowest = 0\n name = transaction.path\n\n if self.__slow_transaction:\n slowest = self.__slow_transaction.duration\n if name in self.__slow_transaction_map:\n slowest = max(self.__slow_transaction_map[name], slowest)\n\n if transaction.duration > slowest:\n # We are going to replace the prior slow transaction.\n # We need to be a bit tricky here. If we are overriding\n # an existing slow transaction for a different name,\n # then we need to restore in the transaction map what\n # the previous slowest duration was for that, or remove\n # it if there wasn't one. This is so we do not incorrectly\n # suppress it given that it was never actually reported\n # as the slowest transaction.\n\n if self.__slow_transaction:\n if self.__slow_transaction.path != name:\n if self.__slow_transaction_old_duration:\n self.__slow_transaction_map[\n self.__slow_transaction.path] = (\n self.__slow_transaction_old_duration)\n else:\n del self.__slow_transaction_map[\n self.__slow_transaction.path]\n\n if name in self.__slow_transaction_map:\n self.__slow_transaction_old_duration = (\n self.__slow_transaction_map[name])\n else:\n self.__slow_transaction_old_duration = None\n\n self.__slow_transaction = transaction\n self.__slow_transaction_map[name] = transaction.duration", "def transaction_trace_data(self, connections):\n\n _logger.debug('Generating transaction trace data.')\n\n if not self.__settings:\n return []\n\n # Create a set 'traces' that is a union of slow transaction,\n # and Synthetics transactions. This ensures we don't send\n # duplicates of a transaction.\n\n traces = set()\n if self.__slow_transaction:\n traces.add(self.__slow_transaction)\n traces.update(self.__synthetics_transactions)\n\n # Return an empty list if no transactions were captured.\n\n if not traces:\n return []\n\n # We want to limit the number of explain plans we do across\n # these. So work out what were the slowest and tag them.\n # Later the explain plan will only be run on those which are\n # tagged.\n\n agent_limits = self.__settings.agent_limits\n explain_plan_limit = agent_limits.sql_explain_plans_per_harvest\n maximum_nodes = agent_limits.transaction_traces_nodes\n\n database_nodes = []\n\n if explain_plan_limit != 0:\n for trace in traces:\n for node in trace.slow_sql:\n # Make sure we clear any flag for explain plans on\n # the nodes in case a transaction trace was merged\n # in from previous harvest period.\n\n node.generate_explain_plan = False\n\n # Node should be excluded if not for an operation\n # that we can't do an explain plan on. Also should\n # not be one which would not be included in the\n # transaction trace because limit was reached.\n\n if (node.node_count < maximum_nodes and\n node.connect_params and node.statement.operation in\n node.statement.database.explain_stmts):\n database_nodes.append(node)\n\n database_nodes = sorted(database_nodes,\n key=lambda x: x.duration)[-explain_plan_limit:]\n\n for node in database_nodes:\n node.generate_explain_plan = True\n\n else:\n for trace in traces:\n for node in trace.slow_sql:\n node.generate_explain_plan = True\n database_nodes.append(node)\n\n # Now generate the transaction traces. We need to cap the\n # number of nodes capture to the specified limit.\n\n trace_data = []\n\n for trace in traces:\n transaction_trace = trace.transaction_trace(\n self, maximum_nodes, connections)\n\n data = [transaction_trace,\n list(trace.string_table.values())]\n\n if self.__settings.debug.log_transaction_trace_payload:\n _logger.debug('Encoding slow transaction data where '\n 'payload=%r.', data)\n\n json_data = json_encode(data)\n\n level = self.__settings.agent_limits.data_compression_level\n level = level or zlib.Z_DEFAULT_COMPRESSION\n\n zlib_data = zlib.compress(six.b(json_data), level)\n\n pack_data = base64.standard_b64encode(zlib_data)\n\n if six.PY3:\n pack_data = pack_data.decode('Latin-1')\n\n root = transaction_trace.root\n\n if trace.record_tt:\n force_persist = True\n else:\n force_persist = False\n\n if trace.include_transaction_trace_request_uri:\n request_uri = trace.request_uri\n else:\n request_uri = None\n\n trace_data.append([transaction_trace.start_time,\n root.end_time - root.start_time,\n trace.path,\n request_uri,\n pack_data,\n trace.guid,\n None,\n force_persist,\n None,\n trace.synthetics_resource_id, ])\n\n return trace_data", "def recordTransaction(self, loop, transaction):\n\n a = {}\n a['time'] = transaction.transactionTime\n a['atm'] = transaction.transactionATM.atmID\n a['transaction'] = transaction.transactionType\n a['cash'] = transaction.transactionATM.atmCash\n a['status'] = transaction.transactionStatus\n self._atmDict[loop] = a\n\n c = {}\n c['time'] = transaction.transactionTime\n c['client'] = transaction.transactionCard.cardAccount.accountClient.clientID\n c['account'] = transaction.transactionCard.cardAccount.accountNumber\n c['transaction'] = transaction.transactionType\n c['balance'] = transaction.transactionCard.cardAccount.accountBalance\n c['status'] = transaction.transactionStatus\n self._clientDict[loop] = c\n\n t = {}\n t['time'] = transaction.transactionTime\n t['transaction'] = transaction.transactionType\n t['amount'] = transaction.transactionAmount\n t['status'] = transaction.transactionStatus\n self._transactionDict[loop] = t", "def trackTrans(self):\n self.data_struct['_trackTrans'] = True", "def slow_transaction_data(self):\n\n # XXX This method no longer appears to be used. Being replaced\n # by the transaction_trace_data() method.\n\n if not self.__settings:\n return []\n\n if not self.__slow_transaction:\n return []\n\n maximum = self.__settings.agent_limits.transaction_traces_nodes\n\n transaction_trace = self.__slow_transaction.transaction_trace(\n self, maximum)\n\n data = [transaction_trace,\n list(self.__slow_transaction.string_table.values())]\n\n if self.__settings.debug.log_transaction_trace_payload:\n _logger.debug('Encoding slow transaction data where '\n 'payload=%r.', data)\n\n json_data = json_encode(data)\n\n level = self.__settings.agent_limits.data_compression_level\n level = level or zlib.Z_DEFAULT_COMPRESSION\n\n zlib_data = zlib.compress(six.b(json_data), level)\n\n pack_data = base64.standard_b64encode(zlib_data)\n\n if six.PY3:\n pack_data = pack_data.decode('Latin-1')\n\n root = transaction_trace.root\n\n trace_data = [[root.start_time,\n root.end_time - root.start_time,\n self.__slow_transaction.path,\n self.__slow_transaction.request_uri,\n pack_data]]\n\n return trace_data", "def transaction_time(self, transaction_time):\n\n self._transaction_time = transaction_time", "def _update_time_delivered(self, time_delivered):\n # Update db record's time_delivered field\n update = {'time_delivered': time_delivered}\n datebase.update_transaction_record(filter=self.filter, update=update)\n \n # Update db record's estimated_time field\n datebase.update_transaction_record(filter=self.filter, {estimated_time:'0'})\n \n # Update db record's transaction status to delivered\n self._update_transaction_status(transaction_status='delivered')\n \t\t self.transaction_info.update(delivery_status='delivered')\n \n # Update object\n \t\tself.transaction_info.update(time_delivered=time_delivered)\n self.transaction_info.update(estimated_time=0)\n self.transaction_info(transaction_status='delivered')\n\n \tdef _update_transaction_status(self, transaction_status, photo=None):\n \"\"\"\n Update record's transaction_status and send sms msg to update seeker\n \"\"\"\n # Send text message when status changes \n self.send_text(message_type=transaction_status)\n\n # Update db record's transaction status\n update = {'transaction_status': transaction_status}\n datebase.update_transaction_record(filter=self.filter, update=update)\n\n # Update object\n self.transaction_info.update('transaction_seeker': transaction_status)\n\n # If delivered ... TODO: do we actually want to remove from db? \n \t\t# if transaction_status == 'delivered':\n # datebase.delete_transaction_record()\n # return 1 \n # arguments against: we wont be able to access delivered photo if we want to do that", "def record_time():\n g.request_start_time = datetime.utcnow()", "def record_apdex_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for metric in metrics:\n self.record_apdex_metric(metric)", "def record_starttime(self):\n\n self._starttime = datetime.datetime.now()\n\n # this is recorded in the report file\n self.metadata[\"result\"][\"starttime\"] = self._starttime.strftime(\"%Y/%m/%d %X\")", "async def test_all_transactions(self):\n response = await self.collect(get_request_text=self.GATLING_LOG)\n self.assert_measurement(response, value=\"2\")", "def enable_slow_log_stats():\n\n global stats\n stats = SlowAsyncIOStats()", "def _log_update_time(self, *_):\n import time\n if not hasattr(self, '_time'):\n setattr(self, '_time', time.time())\n _time = time.time()\n debug('Time since last call: {:.6f}s'.format(_time - getattr(self, '_time')))\n setattr(self, '_time', _time)", "def log_query(self):\n pass", "def record_transaction(self, transaction: Transaction) -> bool:\n if self._locked:\n print('Failed to record transaction! Your account has been locked!'\n )\n return False\n\n if transaction.amount > self.bank_balance:\n print('Failed to record transaction! Not enough balance!')\n return False\n\n budget = self.budget_manager.get_budget(transaction.budget_category)\n if budget.locked:\n print('Failed to record transaction! This budget has been locked!')\n return False\n\n self.transactions.append(transaction)\n self.bank_balance -= transaction.amount\n budget.amount_spent += transaction.amount\n self._warn_and_lock_if_needed(transaction)\n return True", "def test_baseline():\n with log.timed(\"pass\"):\n pass", "def report(logger):\n elapsed = time.time() - started_at\n send_metric(\"Total Songs\", total_songs)\n send_metric(\"Malformed Songs\", malformed_songs)\n send_metric(\"Songs\", valid_songs)\n send_metric(\"Total Songs / second\", (total_songs / elapsed))\n send_metric(\"Rows / second\", (valid_rows / elapsed))", "def test_invalid_txn_infos(benchmark):\n # create mock object\n block_store = Mock()\n batch_tracker = BatchTracker(block_store)\n\n batch_tracker.notify_batch_pending(\n make_batch(\"good_batch\", \"good_txn\"))\n batch_tracker.notify_batch_pending(\n make_batch(\"bad_batch\", \"bad_txn\"))\n\n batch_tracker.notify_txn_invalid(\"good_txn\")\n\n invalid_info = benchmark(batch_tracker.get_invalid_txn_info, \"bad_batch\")\n assert 0 == len(invalid_info)", "async def audit(self, ctx):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a count of the number of unique metrics.
def metric_data_count(self): if not self.__settings: return 0 return len(self.__stats_table)
[ "def count_metrics(self):\n\n return set(self._counts.keys())", "def metrics_count(self):\n\n return len(self.__stats_table)", "def getUnseenCount():", "def unique_counts(self) -> Self:\n return self._from_pyexpr(self._pyexpr.unique_counts())", "def get_number_of_metrics(self):\n # Get metrics\n metrics = self.get_metrics()\n\n # Calculate number of metrics\n n_metrics = len(metrics)\n\n return n_metrics", "def count_unique_values(data: list) -> int:\n unique_lengths: set = get_unique_lengths()\n return len([\n output_value\n for line in data\n for output_value in parse_line(line)[1]\n if len(output_value) in unique_lengths\n ])", "def print_num_uniques(self):\n print 'There are', self.num_uniques, 'unique cabs in this dataset.'", "def number_of_running_metrics(self):\n try:\n return len(self.get_classads(\"OSGRSV==\\\"metrics\\\"\"))\n except TypeError:\n self.rsv.log(\"ERROR\", \"Classad parsing failed, unable to count running metrics\")", "def Count(self) -> int:", "def test_task_unique_total(self):\r\n # from bookie.bcelery import tasks\r\n tasks.count_unique()\r\n\r\n stat = StatBookmark.query.first()\r\n self.assertEqual(stat.attrib, stats.UNIQUE_CT)\r\n self.assertEqual(stat.data, 3)", "def identifier_count(self):\n\n return len(self.unique_identifiers)", "def count_unique(hashable_objects: pd.Series) -> int:\n return hashable_objects.nunique() #len(set(hashable_objects))", "def valuecounts_(self): \n unique, counts = np.unique(self, return_counts=True)\n return np.asarray((unique, counts)).T", "def count_unique_features(self):\n return N_UNIQUE_FEATS", "def count(self, relative=False):\n names = []\n\n for item in keyValueList(self.yml): # [(key, value)]\n if item[0] == 'name':\n names.append(str(item[1]).strip())\n\n frequencies = Counter(names).values() # counts the elements' frequency\n nuniques = sum(1 for v in frequencies if v == 1)\n\n if relative:\n if len(names) == 0:\n return 0\n return float(nuniques)/float(len(names))\n else: \n return nuniques", "def get_num_unique_items(self):\n return len(self.get_unique_item_ids())", "def __distinct_size(self):\n return len(dict.keys(self))", "def _fetch_count_metrics_and_clear(self):\n with self._count_rlock:\n count_metrics = self._count_metrics\n self._count_metrics = defaultdict(int)\n\n return count_metrics", "def test_number_of_metrics(self):\n self.assertEqual(self.expected_number_of_metrics, len(self.metrics))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of slow transaction data collected during the reporting period.
def transaction_trace_data(self, connections): _logger.debug('Generating transaction trace data.') if not self.__settings: return [] # Create a set 'traces' that is a union of slow transaction, # and Synthetics transactions. This ensures we don't send # duplicates of a transaction. traces = set() if self.__slow_transaction: traces.add(self.__slow_transaction) traces.update(self.__synthetics_transactions) # Return an empty list if no transactions were captured. if not traces: return [] # We want to limit the number of explain plans we do across # these. So work out what were the slowest and tag them. # Later the explain plan will only be run on those which are # tagged. agent_limits = self.__settings.agent_limits explain_plan_limit = agent_limits.sql_explain_plans_per_harvest maximum_nodes = agent_limits.transaction_traces_nodes database_nodes = [] if explain_plan_limit != 0: for trace in traces: for node in trace.slow_sql: # Make sure we clear any flag for explain plans on # the nodes in case a transaction trace was merged # in from previous harvest period. node.generate_explain_plan = False # Node should be excluded if not for an operation # that we can't do an explain plan on. Also should # not be one which would not be included in the # transaction trace because limit was reached. if (node.node_count < maximum_nodes and node.connect_params and node.statement.operation in node.statement.database.explain_stmts): database_nodes.append(node) database_nodes = sorted(database_nodes, key=lambda x: x.duration)[-explain_plan_limit:] for node in database_nodes: node.generate_explain_plan = True else: for trace in traces: for node in trace.slow_sql: node.generate_explain_plan = True database_nodes.append(node) # Now generate the transaction traces. We need to cap the # number of nodes capture to the specified limit. trace_data = [] for trace in traces: transaction_trace = trace.transaction_trace( self, maximum_nodes, connections) data = [transaction_trace, list(trace.string_table.values())] if self.__settings.debug.log_transaction_trace_payload: _logger.debug('Encoding slow transaction data where ' 'payload=%r.', data) json_data = json_encode(data) level = self.__settings.agent_limits.data_compression_level level = level or zlib.Z_DEFAULT_COMPRESSION zlib_data = zlib.compress(six.b(json_data), level) pack_data = base64.standard_b64encode(zlib_data) if six.PY3: pack_data = pack_data.decode('Latin-1') root = transaction_trace.root if trace.record_tt: force_persist = True else: force_persist = False if trace.include_transaction_trace_request_uri: request_uri = trace.request_uri else: request_uri = None trace_data.append([transaction_trace.start_time, root.end_time - root.start_time, trace.path, request_uri, pack_data, trace.guid, None, force_persist, None, trace.synthetics_resource_id, ]) return trace_data
[ "def slow_transaction_data(self):\n\n # XXX This method no longer appears to be used. Being replaced\n # by the transaction_trace_data() method.\n\n if not self.__settings:\n return []\n\n if not self.__slow_transaction:\n return []\n\n maximum = self.__settings.agent_limits.transaction_traces_nodes\n\n transaction_trace = self.__slow_transaction.transaction_trace(\n self, maximum)\n\n data = [transaction_trace,\n list(self.__slow_transaction.string_table.values())]\n\n if self.__settings.debug.log_transaction_trace_payload:\n _logger.debug('Encoding slow transaction data where '\n 'payload=%r.', data)\n\n json_data = json_encode(data)\n\n level = self.__settings.agent_limits.data_compression_level\n level = level or zlib.Z_DEFAULT_COMPRESSION\n\n zlib_data = zlib.compress(six.b(json_data), level)\n\n pack_data = base64.standard_b64encode(zlib_data)\n\n if six.PY3:\n pack_data = pack_data.decode('Latin-1')\n\n root = transaction_trace.root\n\n trace_data = [[root.start_time,\n root.end_time - root.start_time,\n self.__slow_transaction.path,\n self.__slow_transaction.request_uri,\n pack_data]]\n\n return trace_data", "def get_settlement_data():\n\n recent_cutoff = datetime.now() - timedelta(hours=settings.get(\"application\",\"recent_user_horizon\"))\n\n s_info = []\n\n ids = utils.mdb.settlements.find({'last_accessed': {'$gte': recent_cutoff}}).distinct('_id')\n\n sorting_hat = {}\n for s_id in ids:\n last_updated = utils.mdb.settlement_events.find({'settlement_id': s_id}).limit(1).sort(\"created_on\",-1)[0]['created_on']\n sorting_hat[last_updated] = s_id\n\n sorted_ids = []\n for timestamp in sorted(sorting_hat.keys(), reverse=True):\n sorted_ids.append(sorting_hat[timestamp])\n\n for s_id in sorted_ids:\n S = settlements.Settlement(_id=s_id, normalize_on_init=False)\n s_dict = copy(S.serialize('dashboard'))\n s_info.append(s_dict)\n\n return \"[\" + \",\".join(s_info) + \"]\"", "def slow_queries(self):\n request = Request(method=\"get\", endpoint=\"/query/slow\")\n\n def response_handler(resp):\n if not resp.is_success:\n raise C8QLQueryListError(resp, request)\n return self._format_queries(resp.body)\n\n return self._execute(request, response_handler)", "def get_timings(self) -> List[Timing]:\n return self.timings", "def showTransactions(self):\n self.scanTransactions()\n txns = []\n\n # Summarize the stats\n for x in range(len(self._trans)):\n stats = self._trans[x]\n trans_time = 0\n remote_calls = 0\n for name, stat in stats:\n trans_time += stat.total_tt\n remote_calls += 1\n txns.append((x, trans_time, remote_calls))\n\n results = [\"TX#\\tTime\\tCalls\",\n \"=\" * 22]\n\n for item in txns:\n results.append(\"%3d\\t%4f\\t%5d\" % item)\n \n return \"\\n\".join(results)", "def getTransactions(self):\n return []", "def get_qatestreporttable_data(self):\n try:\n \n #Fetching the reports of Benchmark Test Execution and returning it\n qa_test_history = sidecar_conn.events.list_test_history(project_id = 3)\n self.event_data = qa_test_history\n return list(qa_test_history) \n except Exception, e:\n exceptions.handle(self.request, \"Unable to fetch the reports.\")\n return []", "def _get_meas_times_from_db(self):\n meas_times = []\n if self._data['report_save_historical_instances_ind'] != 'Y':\n # for non historical reports take measurement time from saved dataset\n dataset = self._jfile.get_current_stored_dataset()\n try:\n meas_time = datetime.datetime.strptime(dataset['meas_time'], '%Y-%m-%d %H:%M:%S')\n except ValueError:\n raise Exception(\"Cannot unformat string %s to datetime\" % dataset['meas_time'])\n meas_times.append(meas_time)\n\n else:\n # for historical reports take measurement times from db datasets\n where_sql = ''\n where_sql_list = list()\n params = [self._id, self._segment_value_id]\n\n if self._process_dataset_ids:\n for dataset_id in self._process_dataset_ids:\n if type(dataset_id) == list:\n where_sql_list.append(\"(report_data_set_instance_id >= %s AND report_data_set_instance_id <= %s)\")\n if dataset_id[0] < dataset_id[1]:\n params.append(dataset_id[0])\n params.append(dataset_id[1])\n else:\n params.append(dataset_id[1])\n params.append(dataset_id[0])\n else:\n where_sql_list.append(\"report_data_set_instance_id = %s\")\n params.append(dataset_id)\n where_sql = ' AND (%s)' % ' OR '.join(where_sql_list)\n\n self._db.Query(\"\"\"SELECT measurement_time\n FROM report_data_set_instance\n WHERE\n `element_id`= %%s\n AND segment_value_id = %%s\n %s\n ORDER BY measurement_time ASC\"\"\" % where_sql, tuple(params))\n meas_times = [item['measurement_time'] for item in self._db.record]\n\n return meas_times", "def timeseries(self) -> List[ResponseTimeseries]:\n return self._timeseries", "def get_latest_transactions(self):", "async def get_all_trades(self) -> List[TradeRecord]:\n\n cursor = await self.db_connection.execute(\"SELECT * from trade_records\")\n rows = await cursor.fetchall()\n await cursor.close()\n records = []\n\n for row in rows:\n record = TradeRecord.from_bytes(row[0])\n records.append(record)\n\n return records", "def getUSDMarketData(time_period):\n \n # Get USD market labels\n market_labels = getUSDMarketLabels()\n \n # Create dictionary to store data\n market_data = {market_label: [] for market_label in market_labels}\n \n # Get time now\n start_time = time.time()\n \n # Define total time to get data\n total_time = time_period*60*60 # seconds\n \n # Create bittrex object\n my_bittrex = Bittrex(api_key = keys.api_key, api_secret = keys.api_secret,\n api_version='v1.1')\n \n while time.time() - start_time < total_time:\n print(f'The amount of time elapsed is: {time.time() - start_time}')\n for market_label in market_labels:\n # Message to user\n print(f'Currently grabbing data for {market_label}...')\n \n # Get market history\n market_history = my_bittrex.get_market_history(market_label)['result']\n\n # Keys to remove from each transaction\n remove_keys = ['FillType', 'OrderType', 'Total']\n \n # Remove uncessary data\n clean_market_history = []\n for transaction in market_history:\n for remove_key in remove_keys:\n transaction.pop(remove_key, None)\n clean_market_history.append(transaction)\n \n # Append data\n market_data[market_label] = market_data[market_label] + clean_market_history \n \n # Wait for 5 seconds\n time.sleep(5)\n \n # Save the results \n for market_label in market_labels:\n # Get market history\n market_history = market_data[market_label]\n \n # List to store data\n temp_Id = []\n temp_dates = []\n temp_prices = []\n temp_quantity = []\n \n # Update list\n for trade in market_history:\n temp_Id.append(trade['Uuid'])\n temp_dates.append(trade['TimeStamp'])\n temp_prices.append(trade['Price'])\n temp_quantity.append(trade['Quantity'])\n \n # Get unique ids\n unique_ids = list(set(temp_Id))\n unique_ids_index = [temp_Id.index(Id) for Id in unique_ids]\n \n # Get unique transactions\n dates = [temp_dates [ii] for ii in unique_ids_index]\n prices = [temp_prices[ii] for ii in unique_ids_index]\n quantity = [temp_quantity[ii] for ii in unique_ids_index]\n \n # list to dict\n market_history = {'Date': dates,\n 'Price': prices,\n 'Quantity': quantity}\n \n # Dict to df\n market_history = pd.DataFrame(market_history, index = unique_ids_index).sort_values(by = 'Date')\n \n # Save market history\n data_prefix = 'data\\\\'\n file_name = f'{market_label}' + '.csv'\n market_history.to_csv(data_prefix + file_name, index = False)\n \n # NOTE SAVE THE ID SO THAT WE CAN APPEND NEW TRADES\n\n return market_labels", "def getTransferListSummary(self):\n p_ids_and_prices = {}\n players = self.getAllPlayerInfoTransferlist()\n\n # Get IDs of all players\n log_event(self.queue, \"Gathering player prices... \")\n for p in players:\n p_bidstatus = p[1]\n p_id = p[8]\n # removed Filter for unlisted / expired players\n if p_id not in p_ids_and_prices:\n p_sellprice = self.getPlayerSellPrice(p_id)\n # If sell price returns 0, need to fetch from Futbin\n if p_sellprice == 0:\n p_sellprice = self.getFutbinPrice_opentab(p_id)\n self.sleep_approx(5) # Delay iteration to not anger futbin\n # Add player ID and price to dict\n p_ids_and_prices[p_id] = p_sellprice\n\n for p_id in p_ids_and_prices:\n p_price = p_ids_and_prices[p_id]\n p_name = self.getPlayerCardName(p_id)\n log_event(self.queue, str(p_name) + \" - #\" +\n str(p_id) + \" Price \" + str(p_price))\n\n num_p_sold = 0\n num_p_expired = 0\n num_p_unlisted = 0\n num_p_listed = 0\n\n sold_p_value = 0\n expired_p_value = 0\n unlisted_p_value = 0\n listed_p_value = 0\n\n for p in players:\n p_bidstatus = p[1]\n p_id = p[8]\n p_soldprice = p[5] # is 0 if unlisted\n p_sellprice = int(p_ids_and_prices[p_id])\n\n if \"won\" in p_bidstatus:\n num_p_sold += 1\n sold_p_value += p_soldprice\n if \"expired\" in p_bidstatus:\n num_p_expired += 1\n expired_p_value += p_sellprice\n if (p_bidstatus == \"listFUTItem\"):\n num_p_unlisted += 1\n unlisted_p_value += p_sellprice\n if (p_bidstatus == \"listFUTItem has-auction-data\"):\n num_p_listed += 1\n listed_p_value += p_sellprice\n\n log_event(self.queue, \"Players sold: \" + str(num_p_sold))\n log_event(self.queue, \"Players expired: \" + str(num_p_expired))\n log_event(self.queue, \"Players listed: \" + str(num_p_listed))\n log_event(self.queue, \"Players unlisted: \" + str(num_p_unlisted))\n log_event(self.queue, \" - - - \")\n log_event(self.queue, \"Sold players value: \" + str(sold_p_value))\n log_event(self.queue, \"Expired players value: \" +\n str(expired_p_value))\n log_event(self.queue, \"Unlisted players value: \" +\n str(unlisted_p_value))\n log_event(self.queue, \"Listed players value: \" + str(listed_p_value))\n\n # TODO subtract bought price\n self.user_players_won += int(num_p_unlisted)\n self.p_ids_and_prices = p_ids_and_prices\n intel = [p_ids_and_prices, num_p_sold, num_p_expired, num_p_unlisted,\n num_p_listed, sold_p_value, expired_p_value, unlisted_p_value, listed_p_value]\n return intel", "def get_time_series(self):\n pass", "def get_pending_transactions(\n endpoint = DEFAULT_ENDPOINT,\n timeout = DEFAULT_TIMEOUT\n) -> list:\n method = \"hmyv2_pendingTransactions\"\n try:\n return rpc_request( method,\n endpoint = endpoint,\n timeout = timeout )[ \"result\" ]\n except KeyError as exception:\n raise InvalidRPCReplyError( method, endpoint ) from exception", "def transactions(self, interval, verbose=True):\n\t\tinterval = interval.lower()\n\t\tassert interval in ['minute', 'hour', 'day'], 'Invalid interval specified.'\n\t\treqst = requests.get('https://www.bitstamp.net/api/v2/transactions/btcusd/?time={}'.format(interval))\n\t\ttime_stamp = datetime.strftime(datetime.now(), \"%Y-%m-%d %H:%M:%S\")\n\t\tif verbose: print(\"({}): Collected transaction data!\".format(time_stamp))\n\t\ttrxns = pd.DataFrame(json.loads(reqst.content))\n\t\tif 'date' in trxns.columns:\n\t\t\ttrxns['date'] = pd.to_datetime(trxns['date'].astype(int).apply(datetime.fromtimestamp))\n\t\treturn trxns", "def time_list(self):\n return (self.N_T * (np.arange(self.N_itr) + 1) /\n self.N_itr * 1000 * self.DT)", "def get_new_data(self):\n\n # record bar parse performance\n self.logger.debug(\"Started parsing new ticks.\")\n start_parse = time.time()\n for exchange in self.exchanges:\n exchange.parse_ticks()\n end_parse = time.time()\n duration = round(end_parse - start_parse, 5)\n\n self.logger.debug(\n \"Parsed \" + str(self.total_instruments) +\n \" instruments' ticks in \" + str(duration) + \" seconds.\")\n self.track_tick_processing_performance(duration)\n\n # wrap new 1 min bars in market events\n new_market_events = []\n for exchange in self.exchanges:\n bars = exchange.get_new_bars()\n for symbol in exchange.get_symbols():\n for bar in bars[symbol]:\n event = MarketEvent(exchange.get_name(), bar)\n new_market_events.append(event)\n # add bars to save-to-db-later queue\n # TODO: store new bars concurrently with a processpool\n self.bars_save_to_db.put(event)\n return new_market_events", "def report_list(timestamp=None, limit=50):\n query = {'method': 'report_list'}\n if timestamp is None:\n ts = datetime.now()\n timestamp = time.mktime(ts.utctimetuple())\n else:\n try:\n timestamp = time.mktime(parse(timestamp).utctimetuple())\n except:\n query.update({'error': 'Unable to convert time to timestamp: ' + str(time)})\n query['timestamp'] = timestamp\n query['limit'] = limit\n return __query(query)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list containing any slow transaction data collected during the reporting period. NOTE Currently only the slowest transaction for the reporting period is retained.
def slow_transaction_data(self): # XXX This method no longer appears to be used. Being replaced # by the transaction_trace_data() method. if not self.__settings: return [] if not self.__slow_transaction: return [] maximum = self.__settings.agent_limits.transaction_traces_nodes transaction_trace = self.__slow_transaction.transaction_trace( self, maximum) data = [transaction_trace, list(self.__slow_transaction.string_table.values())] if self.__settings.debug.log_transaction_trace_payload: _logger.debug('Encoding slow transaction data where ' 'payload=%r.', data) json_data = json_encode(data) level = self.__settings.agent_limits.data_compression_level level = level or zlib.Z_DEFAULT_COMPRESSION zlib_data = zlib.compress(six.b(json_data), level) pack_data = base64.standard_b64encode(zlib_data) if six.PY3: pack_data = pack_data.decode('Latin-1') root = transaction_trace.root trace_data = [[root.start_time, root.end_time - root.start_time, self.__slow_transaction.path, self.__slow_transaction.request_uri, pack_data]] return trace_data
[ "def transaction_trace_data(self, connections):\n\n _logger.debug('Generating transaction trace data.')\n\n if not self.__settings:\n return []\n\n # Create a set 'traces' that is a union of slow transaction,\n # and Synthetics transactions. This ensures we don't send\n # duplicates of a transaction.\n\n traces = set()\n if self.__slow_transaction:\n traces.add(self.__slow_transaction)\n traces.update(self.__synthetics_transactions)\n\n # Return an empty list if no transactions were captured.\n\n if not traces:\n return []\n\n # We want to limit the number of explain plans we do across\n # these. So work out what were the slowest and tag them.\n # Later the explain plan will only be run on those which are\n # tagged.\n\n agent_limits = self.__settings.agent_limits\n explain_plan_limit = agent_limits.sql_explain_plans_per_harvest\n maximum_nodes = agent_limits.transaction_traces_nodes\n\n database_nodes = []\n\n if explain_plan_limit != 0:\n for trace in traces:\n for node in trace.slow_sql:\n # Make sure we clear any flag for explain plans on\n # the nodes in case a transaction trace was merged\n # in from previous harvest period.\n\n node.generate_explain_plan = False\n\n # Node should be excluded if not for an operation\n # that we can't do an explain plan on. Also should\n # not be one which would not be included in the\n # transaction trace because limit was reached.\n\n if (node.node_count < maximum_nodes and\n node.connect_params and node.statement.operation in\n node.statement.database.explain_stmts):\n database_nodes.append(node)\n\n database_nodes = sorted(database_nodes,\n key=lambda x: x.duration)[-explain_plan_limit:]\n\n for node in database_nodes:\n node.generate_explain_plan = True\n\n else:\n for trace in traces:\n for node in trace.slow_sql:\n node.generate_explain_plan = True\n database_nodes.append(node)\n\n # Now generate the transaction traces. We need to cap the\n # number of nodes capture to the specified limit.\n\n trace_data = []\n\n for trace in traces:\n transaction_trace = trace.transaction_trace(\n self, maximum_nodes, connections)\n\n data = [transaction_trace,\n list(trace.string_table.values())]\n\n if self.__settings.debug.log_transaction_trace_payload:\n _logger.debug('Encoding slow transaction data where '\n 'payload=%r.', data)\n\n json_data = json_encode(data)\n\n level = self.__settings.agent_limits.data_compression_level\n level = level or zlib.Z_DEFAULT_COMPRESSION\n\n zlib_data = zlib.compress(six.b(json_data), level)\n\n pack_data = base64.standard_b64encode(zlib_data)\n\n if six.PY3:\n pack_data = pack_data.decode('Latin-1')\n\n root = transaction_trace.root\n\n if trace.record_tt:\n force_persist = True\n else:\n force_persist = False\n\n if trace.include_transaction_trace_request_uri:\n request_uri = trace.request_uri\n else:\n request_uri = None\n\n trace_data.append([transaction_trace.start_time,\n root.end_time - root.start_time,\n trace.path,\n request_uri,\n pack_data,\n trace.guid,\n None,\n force_persist,\n None,\n trace.synthetics_resource_id, ])\n\n return trace_data", "def get_timings(self) -> List[Timing]:\n return self.timings", "def get_latest_transactions(self):\n first_run = False\n if not self._transactions:\n first_run = True\n transactions = []\n for account in self.accounts:\n self._logger.debug('Getting transactions for account \"%s\"', account.ynab_account.name)\n for transaction in account.get_latest_transactions():\n if not self._filter_transaction(transaction):\n transactions.append(transaction)\n self._logger.debug('Caching %s transactions', len(transactions))\n self._transactions.extend(transactions)\n if first_run:\n self._logger.info('First run detected, discarding transactions until now')\n return []\n return transactions", "def get_pending_transactions(\n endpoint = DEFAULT_ENDPOINT,\n timeout = DEFAULT_TIMEOUT\n) -> list:\n method = \"hmyv2_pendingTransactions\"\n try:\n return rpc_request( method,\n endpoint = endpoint,\n timeout = timeout )[ \"result\" ]\n except KeyError as exception:\n raise InvalidRPCReplyError( method, endpoint ) from exception", "def get_settlement_data():\n\n recent_cutoff = datetime.now() - timedelta(hours=settings.get(\"application\",\"recent_user_horizon\"))\n\n s_info = []\n\n ids = utils.mdb.settlements.find({'last_accessed': {'$gte': recent_cutoff}}).distinct('_id')\n\n sorting_hat = {}\n for s_id in ids:\n last_updated = utils.mdb.settlement_events.find({'settlement_id': s_id}).limit(1).sort(\"created_on\",-1)[0]['created_on']\n sorting_hat[last_updated] = s_id\n\n sorted_ids = []\n for timestamp in sorted(sorting_hat.keys(), reverse=True):\n sorted_ids.append(sorting_hat[timestamp])\n\n for s_id in sorted_ids:\n S = settlements.Settlement(_id=s_id, normalize_on_init=False)\n s_dict = copy(S.serialize('dashboard'))\n s_info.append(s_dict)\n\n return \"[\" + \",\".join(s_info) + \"]\"", "def slow_queries(self):\n request = Request(method=\"get\", endpoint=\"/query/slow\")\n\n def response_handler(resp):\n if not resp.is_success:\n raise C8QLQueryListError(resp, request)\n return self._format_queries(resp.body)\n\n return self._execute(request, response_handler)", "def getTransactions(self):\n return []", "def get_pending_transactions():\n\n return History.get_pending().get()", "def getTimeTariffIntervals(self):\n return self._TimeTariffIntervals", "def GetTransactionList(self):\n pklog.info(\"GetTransactionList() was called\")\n return self._get_transaction_list()", "def get_latest_transactions(self):", "def returnTradeHistory(self, time=1 * 60 * 60, limit=100):\n assert limit <= 100, \"'limit' has to be smaller than 100\"\n return self.dpay.rpc.get_trade_history(\n transactions.formatTimeFromNow(-time),\n transactions.formatTimeFromNow(),\n limit,\n api=\"market_history\"\n )", "def query_finished_cycles(self) -> list:\n rows = self.query_all_data()\n return [row for row in rows if row[2] != \"ongoing\"]", "def timeseries(self) -> List[ResponseTimeseries]:\n return self._timeseries", "async def get_all_trades(self) -> List[TradeRecord]:\n\n cursor = await self.db_connection.execute(\"SELECT * from trade_records\")\n rows = await cursor.fetchall()\n await cursor.close()\n records = []\n\n for row in rows:\n record = TradeRecord.from_bytes(row[0])\n records.append(record)\n\n return records", "def trades(self) -> List[ClosedTrade]:\n return store.completed_trades.trades", "def get_all_latest_transactions(self):\n transactions = []\n for account in self.accounts:\n self._logger.debug('Getting transactions for account \"%s\"', account.ynab_account.name)\n for transaction in account.get_latest_transactions():\n if not self._filter_transaction(transaction):\n transactions.append(transaction)\n return transactions", "def showTransactions(self):\n self.scanTransactions()\n txns = []\n\n # Summarize the stats\n for x in range(len(self._trans)):\n stats = self._trans[x]\n trans_time = 0\n remote_calls = 0\n for name, stat in stats:\n trans_time += stat.total_tt\n remote_calls += 1\n txns.append((x, trans_time, remote_calls))\n\n results = [\"TX#\\tTime\\tCalls\",\n \"=\" * 22]\n\n for item in txns:\n results.append(\"%3d\\t%4f\\t%5d\" % item)\n \n return \"\\n\".join(results)", "def getTransferListSummary(self):\n p_ids_and_prices = {}\n players = self.getAllPlayerInfoTransferlist()\n\n # Get IDs of all players\n log_event(self.queue, \"Gathering player prices... \")\n for p in players:\n p_bidstatus = p[1]\n p_id = p[8]\n # removed Filter for unlisted / expired players\n if p_id not in p_ids_and_prices:\n p_sellprice = self.getPlayerSellPrice(p_id)\n # If sell price returns 0, need to fetch from Futbin\n if p_sellprice == 0:\n p_sellprice = self.getFutbinPrice_opentab(p_id)\n self.sleep_approx(5) # Delay iteration to not anger futbin\n # Add player ID and price to dict\n p_ids_and_prices[p_id] = p_sellprice\n\n for p_id in p_ids_and_prices:\n p_price = p_ids_and_prices[p_id]\n p_name = self.getPlayerCardName(p_id)\n log_event(self.queue, str(p_name) + \" - #\" +\n str(p_id) + \" Price \" + str(p_price))\n\n num_p_sold = 0\n num_p_expired = 0\n num_p_unlisted = 0\n num_p_listed = 0\n\n sold_p_value = 0\n expired_p_value = 0\n unlisted_p_value = 0\n listed_p_value = 0\n\n for p in players:\n p_bidstatus = p[1]\n p_id = p[8]\n p_soldprice = p[5] # is 0 if unlisted\n p_sellprice = int(p_ids_and_prices[p_id])\n\n if \"won\" in p_bidstatus:\n num_p_sold += 1\n sold_p_value += p_soldprice\n if \"expired\" in p_bidstatus:\n num_p_expired += 1\n expired_p_value += p_sellprice\n if (p_bidstatus == \"listFUTItem\"):\n num_p_unlisted += 1\n unlisted_p_value += p_sellprice\n if (p_bidstatus == \"listFUTItem has-auction-data\"):\n num_p_listed += 1\n listed_p_value += p_sellprice\n\n log_event(self.queue, \"Players sold: \" + str(num_p_sold))\n log_event(self.queue, \"Players expired: \" + str(num_p_expired))\n log_event(self.queue, \"Players listed: \" + str(num_p_listed))\n log_event(self.queue, \"Players unlisted: \" + str(num_p_unlisted))\n log_event(self.queue, \" - - - \")\n log_event(self.queue, \"Sold players value: \" + str(sold_p_value))\n log_event(self.queue, \"Expired players value: \" +\n str(expired_p_value))\n log_event(self.queue, \"Unlisted players value: \" +\n str(unlisted_p_value))\n log_event(self.queue, \"Listed players value: \" + str(listed_p_value))\n\n # TODO subtract bought price\n self.user_players_won += int(num_p_unlisted)\n self.p_ids_and_prices = p_ids_and_prices\n intel = [p_ids_and_prices, num_p_sold, num_p_expired, num_p_unlisted,\n num_p_listed, sold_p_value, expired_p_value, unlisted_p_value, listed_p_value]\n return intel" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resets the accumulated statistics back to initial state for metric data.
def reset_metric_stats(self): self.__stats_table = {}
[ "def reset_statistics(self):\n self._statistics = {}", "def _reset_stats(self):\n self.confusion_matrix = None", "def reset_stats() -> None:\n STATS[\"cleaned\"] = 0\n STATS[\"rows\"] = 0\n STATS[\"correct_format\"] = 0\n STATS[\"incorrect_format\"] = 0\n STATS[\"first_val\"] = 0", "def reset(self) -> None:\n for name in self.metrics.keys():\n self._metrics[name].reset()", "def reset_state(self):\n for name in self.metrics:\n self.metrics[name].reset_state()", "def clearStats(self):\n self._stats = None", "def reset_stats():\n __STATS[\"data\"] = {}\n __STATS[\"mod\"] = None", "def reset_metric_variables(self) -> None:\n with self._lock:\n self._reset_metric_variables()", "def clear_stats(self):\n self._stats = None", "def reset_metrics(self):\n self.metrics['loss'] = 0.0\n self.metrics['num_tokens'] = 0\n self.metrics['correct_tokens'] = 0\n self.metrics['correct_pred'] = 0\n self.metrics['pred_count'] = 0", "def reset_metrics(self):\n super().reset_metrics()\n del self.observations[:]\n del self.labels[:]", "def reset_metrics(self):\n self.metrics['loss'] = 0.0", "def reset(self):\n self.observation = None\n self.history.clear()\n self.reset_metrics()", "def reset_all(self) -> None:\n for metric in self:\n metric.reset()", "def reset_running_stats(self):\n self.module.reset_running_stats()", "def reset(self):\n self.ref_value = 0.0\n self._average = 0.0\n self.num_samples = 0", "def _reset_metrics(self):\n self.train_loss = []\n self.val_loss = []", "def reset_running_stats(self):\n super().reset_running_stats()\n self.inv_learning_rate.zero_()\n self.num_batches.zero_()", "def reset(self):\n self._samples = None\n self._frequencies = None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merges data from a single transaction. Snapshot is an instance of StatsEngine that contains stats for the single transaction.
def merge(self, snapshot): if not self.__settings: return self.merge_metric_stats(snapshot) self._merge_transaction_events(snapshot) self._merge_synthetics_events(snapshot) self._merge_error_events(snapshot) self._merge_error_traces(snapshot) self._merge_custom_events(snapshot) self._merge_span_events(snapshot) self._merge_sql(snapshot) self._merge_traces(snapshot)
[ "def merge_transaction(self, transaction):\n self.inputs += transaction.inputs\n self.outputs += transaction.outputs\n self.shuffle()\n self.update_totals()\n self.sign_and_update()", "def merge_metric_stats(self, snapshot):\n\n if not self.__settings:\n return\n\n for key, other in six.iteritems(snapshot.__stats_table):\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)", "def merge(data1, data2, ContainerType=pg.DataContainer, snap=0.001):\n data = ContainerType(data1)\n data.add(data2, snap)\n return data", "def get_snapshot(self, snapshot):\n return self._get(_snapshot.Snapshot, snapshot)", "def record_transaction(self, transaction):\n\n if not self.__settings:\n return\n\n settings = self.__settings\n\n # Record the apdex, value and time metrics generated from the\n # transaction. Whether time metrics are reported as distinct\n # metrics or into a rollup is in part controlled via settings\n # for minimum number of unique metrics to be reported and thence\n # whether over a time threshold calculated as percentage of\n # overall request time, up to a maximum number of unique\n # metrics. This is intended to limit how many metrics are\n # reported for each transaction and try and cut down on an\n # explosion of unique metric names. The limits and thresholds\n # are applied after the metrics are reverse sorted based on\n # exclusive times for each metric. This ensures that the metrics\n # with greatest exclusive time are retained over those with\n # lesser time. Such metrics get reported into the performance\n # breakdown tab for specific web transactions.\n\n self.record_apdex_metrics(transaction.apdex_metrics(self))\n\n self.merge_custom_metrics(transaction.custom_metrics.metrics())\n\n self.record_time_metrics(transaction.time_metrics(self))\n\n # Capture any errors if error collection is enabled.\n # Only retain maximum number allowed per harvest.\n\n error_collector = settings.error_collector\n\n if (error_collector.enabled and settings.collect_errors and\n len(self.__transaction_errors) <\n settings.agent_limits.errors_per_harvest):\n self.__transaction_errors.extend(transaction.error_details())\n\n self.__transaction_errors = self.__transaction_errors[:\n settings.agent_limits.errors_per_harvest]\n\n if (error_collector.capture_events and\n error_collector.enabled and\n settings.collect_error_events):\n events = transaction.error_events(self.__stats_table)\n for event in events:\n self._error_events.add(event, priority=transaction.priority)\n\n # Capture any sql traces if transaction tracer enabled.\n\n if settings.slow_sql.enabled and settings.collect_traces:\n for node in transaction.slow_sql_nodes(self):\n self.record_slow_sql_node(node)\n\n # Remember as slowest transaction if transaction tracer\n # is enabled, it is over the threshold and slower than\n # any existing transaction seen for this period and in\n # the historical snapshot of slow transactions, plus\n # recording of transaction trace for this transaction\n # has not been suppressed.\n\n transaction_tracer = settings.transaction_tracer\n\n if (not transaction.suppress_transaction_trace and\n transaction_tracer.enabled and settings.collect_traces):\n\n # Transactions saved for Synthetics transactions\n # do not depend on the transaction threshold.\n\n self._update_synthetics_transaction(transaction)\n\n threshold = transaction_tracer.transaction_threshold\n\n if threshold is None:\n threshold = transaction.apdex_t * 4\n\n if transaction.duration >= threshold:\n self._update_slow_transaction(transaction)\n\n # Create the transaction event and add it to the\n # appropriate \"bucket.\" Synthetic requests are saved in one,\n # while transactions from regular requests are saved in another.\n\n if transaction.synthetics_resource_id:\n event = transaction.transaction_event(self.__stats_table)\n self._synthetics_events.add(event)\n\n elif (settings.collect_analytics_events and\n settings.transaction_events.enabled):\n\n event = transaction.transaction_event(self.__stats_table)\n self._transaction_events.add(event, priority=transaction.priority)\n\n # Merge in custom events\n\n if (settings.collect_custom_events and\n settings.custom_insights_events.enabled):\n self.custom_events.merge(transaction.custom_events)\n\n # Merge in span events\n\n if (settings.distributed_tracing.enabled and\n settings.span_events.enabled and settings.collect_span_events):\n if settings.infinite_tracing.enabled:\n for event in transaction.span_protos(settings):\n self._span_stream.put(event)\n elif transaction.sampled:\n for event in transaction.span_events(self.__settings):\n self._span_events.add(event, priority=transaction.priority)", "def __add__(self, other):\n t = deepcopy(self)\n t.merge_transaction(other)\n return t", "def gettransaction(self, txid, blockheight=None):\n if blockheight is None or txid not in self:\n tx = self.rpc.gettransaction(txid)\n # save if we don't know about this tx\n if txid not in self:\n self.add({txid: tx})\n if \"time\" not in tx:\n tx[\"time\"] = tx[\"timereceived\"]\n return tx\n tx = self[txid]\n return {\"hex\": tx[\"hex\"], \"time\": tx[\"time\"]}", "async def incoming_chaindata(session: DbSession, tx: ChainTxDb):\n upsert_chain_tx(session=session, tx=tx)\n upsert_pending_tx(session=session, tx_hash=tx.hash)", "def _merge_report(self, target, new):\n time = None\n if 'ts' in new['parsed']:\n time = new['parsed']['ts']\n\n if (target.get('lastSeenDate', None) and\n time and\n target['lastSeenDate'] < time):\n target['lastSeenDate'] = time\n\n query_millis = int(new['parsed']['stats']['millis'])\n target['stats']['totalTimeMillis'] += query_millis\n target['stats']['count'] += 1\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']", "def add_snapshot(self, snapshot):\n self.last_snapshot = snapshot", "def snapshot(self):\n if self.add_t_snap:\n utils.save_df(self.cluster.withColumn(\"t\", lit(float(self.t))),\n f\"t{self.t}\", **self.save_params)\n else:\n utils.save_df(self.cluster, f\"t{self.t}\", **self.save_params)", "def rollback(self, snapshot):\n\n if not self.__settings:\n return\n\n _logger.debug('Performing rollback of data into '\n 'subsequent harvest period. Metric data and transaction events'\n 'will be preserved and rolled into next harvest')\n\n self.merge_metric_stats(snapshot)\n self._merge_transaction_events(snapshot, rollback=True)\n self._merge_synthetics_events(snapshot, rollback=True)\n self._merge_error_events(snapshot)\n self._merge_custom_events(snapshot, rollback=True)\n self._merge_span_events(snapshot, rollback=True)", "def apply_transaction(self, transaction):\n vm = self.get_vm()\n return vm.apply_transaction(transaction)", "def __mapToTransaction(data: dict) -> Transaction:\n return Transaction(data[\"_id\"], data[\"amount\"], data[\"note\"], data[\"category\"], data[\"date\"].date())", "def checkout(self, version: Optional[int] = None) -> pd.DataFrame:\n # Use the last snapshot as default if no version is specified. Ensure\n # a given version exists in the snapshot index.\n if version is None:\n version = self.snapshots().last_snapshot().version\n elif not self.snapshots().has_version(version):\n raise ValueError('unknown version {}'.format(version))\n # The index values for the returned data frame depend on the the type\n # of key that was used for the archive. If the archive does not have a\n # primary key, the row key represents the row index value. If a primary\n # key is defined, we use the row identifier as the row index value.\n is_keyed = self.store.primary_key() is not None\n # Get dataset schema at the given version.\n columns = self.schema().at_version(version)\n colids = [c.colid for c in columns]\n # Get the row values and their position.\n rows = list()\n with self.reader() as reader:\n for row in reader:\n if row.timestamp.contains(version):\n pos, vals = row.at_version(version, colids, raise_error=False)\n rowidx = row.rowid if is_keyed else row.key.value\n rows.append((rowidx, pos, vals))\n # Sort rows in ascending order.\n rows.sort(key=lambda r: r[1])\n # Create document for the retrieved snapshot.\n data, rowindex = list(), list()\n for rowid, _, vals in rows:\n data.append(vals)\n rowindex.append(rowid)\n return pd.DataFrame(data=data, index=rowindex, columns=columns, dtype=object)", "def update():\n ts = transactions.load()\n l = ledger.load()\n\n count = len(l)\n ledger.merge_transactions(l, ts)\n count = len(l) - count\n\n if count > 0:\n config.logger.info(\"merging {} new transactions\".format(count))\n ledger.store(l)", "def snapshot(self, snapshot):\n self._context[\"snapshot\"] = snapshot", "def merge(self, dataset):\n def merge_data(source, dest):\n for key, value in source.items():\n if isinstance(value, dict):\n merge_data(value, dest.setdefault(key, {}))\n else:\n dest[key] = value\n return dest\n\n merge_data(dataset.data, self._data)\n\n for h in dataset.task_history:\n if h not in self._task_history:\n self._task_history.append(h)", "def get_snapshot(self, index):\n # Reconstruct Snapshot\n snapshot = Snapshot(\n position=numpy.vstack([self['position/x'],\n self['position/y']]))\n\n # Add other scalar and vector fields\n remaining_vectors = [v for v in self.vectors\n if v != 'position']\n for vector in remaining_vectors:\n vec_data = numpy.empty(\n shape=(self.n_dimensions,\n self.n_samples,\n self.n_snapshots))\n for dim_index in range(self.n_dimensions):\n key = vector + '/' + self.axis_labels[dim_index]\n vec_data[dim_index] = self[key][:, index]\n setattr(snapshot, vector, vec_data)\n\n for scalar in self.scalars:\n setattr(snapshot, scalar, numpy.asarray(self[scalar][:, index]))\n\n # Add properties\n setattr(snapshot, 'properties', {})\n for key, value in self['properties'].items():\n snapshot.properties[key] = value[()]\n\n return snapshot" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Performs a "rollback" merge after a failed harvest. Snapshot is a copy of the main StatsEngine data that we attempted to harvest, but failed. Not all types of data get merged during a rollback.
def rollback(self, snapshot): if not self.__settings: return _logger.debug('Performing rollback of data into ' 'subsequent harvest period. Metric data and transaction events' 'will be preserved and rolled into next harvest') self.merge_metric_stats(snapshot) self._merge_transaction_events(snapshot, rollback=True) self._merge_synthetics_events(snapshot, rollback=True) self._merge_error_events(snapshot) self._merge_custom_events(snapshot, rollback=True) self._merge_span_events(snapshot, rollback=True)
[ "def rollback(self, stage, enodes, exception):", "def revert_to_snapshot(self, context, share, snapshot):\n\n reservations = self._handle_revert_to_snapshot_quotas(\n context, share, snapshot)\n\n try:\n if share.get('has_replicas'):\n self._revert_to_replicated_snapshot(\n context, share, snapshot, reservations)\n else:\n self._revert_to_snapshot(\n context, share, snapshot, reservations)\n except Exception:\n with excutils.save_and_reraise_exception():\n if reservations:\n QUOTAS.rollback(\n context, reservations,\n share_type_id=share['instance']['share_type_id'])", "def rollback(self):\n self._state_machine.transition_to_rollback()\n for action in reversed(self._executed_actions):\n try:\n self.execute_with_retries(action, lambda a: a.rollback())\n except: # pylint: disable=bare-except\n pass # on exception, carry on with rollback of other steps\n self._state_machine.transition_to_rollback_complete()", "def test_update_fail_merge(self):\n repo = GitRepository.create(self.pkg)\n os.chdir(self.pkg)\n\n dsc = self._dsc('2.6-2')\n ok_(import_dsc(['arg0', '--pristine-tar', dsc]) == 0)\n self._check_repo_state(repo, 'master', ['master', 'upstream', 'pristine-tar'])\n\n heads = self.rem_refs(repo, self.def_branches)\n\n orig = self._orig('2.8')\n with patch('gbp.scripts.import_orig.debian_branch_merge',\n side_effect=GitRepositoryError('this is a fail merge error mock')):\n ok_(import_orig(['arg0', '--no-interactive', '--pristine-tar', orig]) == 1)\n self._check_repo_state(repo, 'master', ['master', 'upstream', 'pristine-tar'],\n tags=['debian/2.6-2', 'upstream/2.6'])\n self.check_refs(repo, heads)", "def rollback(self, session):\n pass", "def test_fail_transaction(self):\n source_wallet = self.source_user.wallets.last()\n target_wallet = self.target_user.wallets.last()\n\n source_balance_init = source_wallet.balance\n target_balance_init = target_wallet.balance\n\n data = {\n 'initial_amount': 1100,\n 'source_wallet': source_wallet,\n 'target_wallet': target_wallet,\n }\n execute_wallet_transaction(data)\n\n source_wallet.refresh_from_db()\n target_wallet.refresh_from_db()\n\n self.assertTrue(source_balance_init == source_wallet.balance)\n self.assertTrue(target_balance_init == target_wallet.balance)\n\n self.assertEqual(source_wallet.outcome_transactions.last().status, TRANSACTION_FAIL_STATUS)", "def rollback():\n\n rollback_release()", "def test_merge_fail(env, project, repo_a, repo_b, users):\n project.batch_limit = 1\n\n root_a = repo_a.make_commit(None, 'initial', None, tree={'a': 'a_0'})\n repo_a.make_ref('heads/master', root_a)\n repo_a.protect('master')\n root_b = repo_b.make_commit(None, 'initial', None, tree={'a': 'b_0'})\n repo_b.make_ref('heads/master', root_b)\n repo_b.protect('master')\n\n # first set of matched PRs\n pr1a = make_pr(repo_a, 'A', [{'a': 'a_1'}], label='do-a-thing')\n pr1b = make_pr(repo_b, 'B', [{'a': 'b_1'}], label='do-a-thing')\n\n # add a conflicting commit to B so the staging fails\n repo_b.make_commit('heads/master', 'cn', None, tree={'a': 'cn'})\n\n # and a second set of PRs which should get staged while the first set\n # fails\n pr2a = make_pr(repo_a, 'A2', [{'b': 'ok'}], label='do-b-thing')\n pr2b = make_pr(repo_b, 'B2', [{'b': 'ok'}], label='do-b-thing')\n\n env['runbot_merge.project']._check_progress()\n\n s2 = to_pr(env, pr2a) | to_pr(env, pr2b)\n st = env['runbot_merge.stagings'].search([])\n assert set(st.batch_ids.prs.ids) == set(s2.ids)\n\n failed = to_pr(env, pr1b)\n assert failed.state == 'error'\n assert pr1b.comments == [\n (users['reviewer'], 'hansen r+'),\n (users['user'], re_matches('^Unable to stage PR')),\n ]\n other = to_pr(env, pr1a)\n assert not other.staging_id\n assert [\n c['commit']['message']\n for c in repo_a.log('heads/staging.master')\n ] == [\n re_matches('^force rebuild'),\n 'commit_A2_00\\n\\ncloses %s#2' % repo_a.name,\n 'initial'\n ], \"dummy commit + squash-merged PR commit + root commit\"", "def rollback(self, context: 'IconScoreContext', _block_height: int, _block_hash: bytes):\n Logger.info(tag=ROLLBACK_LOG_TAG, msg=\"rollback() start\")\n\n self.prep_address_converter: 'PRepAddressConverter' = context.storage.meta.get_prep_address_converter(context)\n\n self.preps = self._load_preps(context)\n self.term = self._load_term(context)\n\n Logger.info(tag=ROLLBACK_LOG_TAG, msg=f\"rollback() end: {self.term}\")", "def rollback(self):\n self.__target.rollback()", "def rollback():\n get_session().rollback()", "def rollback(commit_id):\r\n require('settings', provided_by=[production, staging])\r\n require('branch', provided_by=[stable, master, branch])\r\n \r\n maintenance_up()\r\n checkout_latest()\r\n git_reset(commit_id)\r\n gzip_assets()\r\n deploy_to_s3()\r\n maintenance_down()", "def test_merge_backup_with_failover_logs(self):\n self.log.info(\"Load 1st batch docs\")\n create_gen1 = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n self._load_all_buckets(self.master, create_gen1, \"create\", 0)\n failed_persisted_bucket = []\n rest = RestConnection(self.master)\n cluster_nodes = rest.get_nodes()\n for bucket in self.buckets:\n ready = RebalanceHelper.wait_for_stats_on_all(self.backupset.cluster_host,\n bucket.name, 'ep_queue_size',\n 0, timeout_in_seconds=120)\n if not ready:\n failed_persisted_bucket.append(bucket.name)\n if failed_persisted_bucket:\n self.fail(\"Buckets %s did not persisted.\" % failed_persisted_bucket)\n self.log.info(\"Stop persistence at each node\")\n clusters = copy.deepcopy(cluster_nodes)\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n for bucket in self.buckets:\n for node in clusters:\n shell.execute_command(\"%scbepctl%s %s:11210 -b %s stop\" % \\\n (self.cli_command_location,\n self.cmd_ext,\n node.ip,\n bucket.name))\n shell.disconnect()\n self.log.info(\"Load 2nd batch docs\")\n create_gen2 = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n self._load_all_buckets(self.master, create_gen2, \"create\", 0)\n self.sleep(5)\n self.log.info(\"Crash cluster via kill memcached\")\n for node in clusters:\n for server in self.servers:\n if node.ip == server.ip:\n num_entries = 4\n reach_num_entries = False\n while not reach_num_entries:\n shell = RemoteMachineShellConnection(server)\n shell.kill_memcached()\n ready = False\n while not ready:\n if not RestHelper(RestConnection(server)).is_ns_server_running():\n self.sleep(10)\n else:\n ready = True\n cmd = \"%scbstats%s %s:11210 failovers -u %s -p %s | grep num_entries \" \\\n \"| gawk%s '{printf $2}' | grep -m 5 '4\\|5\\|6\\|7'\" \\\n % (self.cli_command_location, self.cmd_ext, server.ip,\n \"cbadminbucket\", \"password\", self.cmd_ext)\n output, error = shell.execute_command(cmd)\n shell.disconnect()\n if output:\n self.log.info(\"number failover logs entries reached. %s \" % output)\n reach_num_entries = True\n self.backup_create()\n self.log.info(\"Start backup data\")\n self.backup_cluster()\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)\n self.log.info(\"Load 3rd batch docs\")\n create_gen3 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n self._load_all_buckets(self.master, create_gen3, \"create\", 0)\n self.backup_cluster()\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)", "def rollback(self):\n try:\n if self._cur_batch:\n self._cur_batch.rollback()\n except ValueError:\n # ignore \"Batch must be in progress to rollback\" error\n pass\n self._cur_batch = None\n self._num_mutations = 0", "def rollback(commit_id):\n _confirm_branch()\n \n require('settings', provided_by=[production, staging])\n require('branch', provided_by=[stable, master, branch])\n \n maintenance_up()\n checkout_latest()\n git_reset(commit_id)\n gzip_assets()\n deploy_to_s3()\n maintenance_down()", "def _revert_to_snapshot(self, context, share, snapshot, reservations):\n\n # Set status of share to 'reverting'\n self.db.share_update(\n context, snapshot['share_id'],\n {'status': constants.STATUS_REVERTING})\n\n # Set status of snapshot to 'restoring'\n self.db.share_snapshot_update(\n context, snapshot['id'],\n {'status': constants.STATUS_RESTORING})\n\n # Send revert API to share host\n self.share_rpcapi.revert_to_snapshot(\n context, share, snapshot, share['instance']['host'], reservations)", "def rollback(self, schema: ArchiveSchema, writer: ArchiveFileWriter, version: int):\n # Get an updated shapshot listing.\n snapshots = self.snapshots.rollback(version=version)\n # Materialize the modified archive.\n self._write(schema=schema, writer=writer, snapshots=snapshots)\n # Update the cached objects\n self.schema = schema\n self.snapshots = snapshots", "def rollback(self, cr, uid, ids, context = {}):\n main_arch_obj = self.pool.get('hr.payroll.main.archive')\n loan_arch_obj = self.pool.get('hr.loan.archive')\n archive_obj = self.pool.get('hr.allowance.deduction.archive')\n emp_loan_obj = self.pool.get('hr.employee.loan')\n data = self.get_data(cr, uid, ids, context = context)\n emp_loan_ids=[]\n if not data['archive_ids']:\n raise osv.except_osv(_('Error'), _('No Such %s In The %sth Month Year Of %s To Be Rollbacked')\n % (data['type'], data['month'], data['year']))\n loan_arch_ids = loan_arch_obj.search(cr, uid, [('main_arch_id','in',data['archive_ids'])])\n if loan_arch_ids:\n emp_loan_ids=[rec.loan_id.id for rec in loan_arch_obj.browse(cr, uid,loan_arch_ids, context = context) ]\n if data['type'] == 'salary':\n testq = time.time()\n main_arch_obj.unlink(cr, uid, data['archive_ids'])\n print \" Salary Time Rollbacking ###########################\", time.time()-testq\n else:\n testq = time.time()\n archive_obj.unlink(cr, uid, data['addendums_arch_ids'])\n print \" Salary Time Rollbacking ###########################\", time.time()-testq\n for e in main_arch_obj.browse(cr, uid, data['archive_ids'], context = context) :\n if not e.allow_deduct_ids:\n main_arch_obj.unlink(cr, uid, e.id)\n if emp_loan_ids:\n emp_loan_comnt = emp_loan_obj.read(cr, uid, emp_loan_ids, ['comments','end_date'])\n for rec in emp_loan_comnt:\n x=emp_loan_obj.write(cr,uid,emp_loan_ids,{'comments':rec['comments'] and rec['comments']+'' or '' },context)\n print \" Salary Time Rollbacking emp_loan_ids###########################\", time.time()-testq\n \n if context.get('salary_batch_id'):\n self.pool.get('hr.salary.batch').write(cr,uid,[context.get('salary_batch_id')],{'batch_total':0,} ) \n self.write(cr, uid, ids, { 'state': 'draft'}, context=context)\n return {}", "def rollbackSnapshotLXCContainer(self,node,vmid,snapname):\n post_data = {}\n data = self.connect('post','nodes/%s/lxc/%s/snapshot/%s/rollback' % (node,vmid,snapname), post_data) \n return data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }