query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
Assert all quit jobs are sent to collaborators.
|
def all_quit_jobs_sent(self):
return set(self.quit_job_sent_to) == set(self.authorized_cols)
|
[
"def test_all(self):\n self.assertEqual(twisted.mail.pop3client.__all__, [])",
"def testQuitAll():\r\n allScripts = BrewPiProcesses()\r\n allScripts.update()\r\n print (\"Running instances of BrewPi before asking them to quit:\")\r\n pprint.pprint(allScripts)\r\n allScripts.quitAll()\r\n sleep(2)\r\n allScripts.update()\r\n print (\"Running instances of BrewPi after asking them to quit:\")\r\n pprint.pprint(allScripts)",
"def test_watchers_are_finished(self):\n from cassandra.io.libevreactor import _global_loop\n with patch.object(_global_loop, \"_thread\"),\\\n patch.object(_global_loop, \"notify\"):\n\n self.make_connection()\n\n # We have to make a copy because the connections shouldn't\n # be alive when we verify them\n live_connections = set(_global_loop._live_conns)\n\n # This simulates the process ending without cluster.shutdown()\n # being called, then with atexit _cleanup for libevreactor would\n # be called\n libev__cleanup(_global_loop)\n for conn in live_connections:\n self.assertTrue(conn._write_watcher.stop.mock_calls)\n self.assertTrue(conn._read_watcher.stop.mock_calls)\n\n _global_loop._shutdown = False",
"def test_stop_event_quit(self):\n\n class TestUser(User):\n @task\n def my_task(self):\n pass\n\n with mock.patch(\"locust.rpc.rpc.Server\", mocked_rpc()) as server:\n master = self.get_runner(user_classes=[TestUser])\n\n @self.environment.events.test_stopping.add_listener\n def on_test_stopping(*_, **__):\n self.runner_stopping = True\n\n @self.environment.events.test_stop.add_listener\n def on_test_stop(*_, **__):\n self.runner_stopped = True\n\n for i in range(5):\n server.mocked_send(Message(\"client_ready\", __version__, \"fake_client%i\" % i))\n\n master.start(7, 7)\n self.assertEqual(10, len(server.outbox))\n master.quit()\n self.assertTrue(self.runner_stopping)\n self.assertTrue(self.runner_stopped)",
"def test_quit_cli(self):\n storage.delete_all()\n self.assertTrue(self.CLI.do_quit(self.CLI))",
"def test_someSuccesses(self):\n ds = self.makeDeliveries(u'test1@example.com', u'test2@example.com')\n ds[0].mailSent(None, self.scheduler)\n self.assertEqual(set(self.message.iterStatuses()),\n set([exmess.OUTBOX_STATUS, exmess.UNREAD_STATUS,\n exmess.SENT_STATUS]))",
"def test_unschedule_all(friends):\n dispatcher, scheduler, action = friends()\n assert dispatcher.job_count() == 0\n\n dispatcher.add_action(\"foo\", action)\n dispatcher.add_scheduler(\"bar\", scheduler)\n dispatcher.schedule_action(\"bar\", \"foo\")\n assert dispatcher.job_count() == 1\n assert dispatcher.get_scheduled_action_count() == 1\n\n dispatcher.clear_jobs()\n assert dispatcher.job_count() == 0\n\n dispatcher.add_action(\"foo2\", action)\n dispatcher.schedule_action(\"bar\", \"foo2\")\n assert dispatcher.get_scheduled_action_count() == 2\n\n dispatcher.unschedule_all_schedulers()\n assert dispatcher.job_count() == 0\n assert dispatcher.get_scheduled_action_count() == 0",
"def test_split_circuits(self):\n max_circs = self.fake_api_backend.configuration().max_experiments\n\n circs = []\n for _ in range(max_circs+2):\n circs.append(self._qc)\n job_set = self._jm.run(circs, backend=self.fake_api_backend)\n job_set.results()\n statuses = job_set.statuses()\n\n self.assertEqual(len(statuses), 2)\n self.assertTrue(all(s is JobStatus.DONE for s in statuses))\n self.assertTrue(len(job_set.jobs()), 2)",
"def test_process_remove_client_schedule(self):\n error = self.process_remove_client_schedule()\n for err in error: assert err == 0",
"def test_list_webhook_jobs(self):\n pass",
"def test_process_delete_schedule_server(self):\n error, out = self.process_delete_schedule_server()\n for err in error: assert err == 0",
"def test_process_delete_schedule(self):\n error, out = self.process_delete_schedule()\n for err in error: assert err == 0",
"def test_join(self):\n self.parent.makedirs()\n\n reactor = CountingReactor([])\n started = self.successResultOf(self.pool.start(reactor))\n joining = Deferred.fromCoroutine(started.join())\n self.assertNoResult(joining)\n for w in reactor._workers:\n assert_that(w.transport._closed, contains(_WORKER_AMP_STDIN))\n for fd in w.transport._closed:\n w.childConnectionLost(fd)\n for f in [w.processExited, w.processEnded]:\n f(Failure(ProcessDone(0)))\n assert_that(self.successResultOf(joining), none())\n assert_that(started.testLog.closed, equal_to(True))\n assert_that(started.testDirLock.locked, equal_to(False))",
"async def test_join_and_leave_all_subscription(self):\n # Arrange\n communicator = WebsocketCommunicator(application, self.url)\n connected, subprotocol = await communicator.connect()\n # Act 1 (Subscribe)\n for category in self.categories:\n msg = {\n \"option\": \"subscribe\",\n \"category\": category,\n \"csc\": \"all\",\n \"salindex\": \"all\",\n \"stream\": \"all\",\n }\n await communicator.send_json_to(msg)\n # Assert 1\n response = await communicator.receive_json_from()\n assert response[\n \"data\"\n ] == \"Successfully subscribed to {}-all-all-all\".format(category)\n # Act 2 (Unsubscribe)\n for category in self.categories:\n msg = {\n \"option\": \"unsubscribe\",\n \"category\": category,\n \"csc\": \"all\",\n \"salindex\": \"all\",\n \"stream\": \"all\",\n }\n await communicator.send_json_to(msg)\n # Assert 2\n response = await communicator.receive_json_from()\n assert response[\n \"data\"\n ] == \"Successfully unsubscribed to {}-all-all-all\".format(category)\n await communicator.disconnect()",
"def test_process_remove_schedule_client(self):\n error = self.process_remove_schedule_client()\n for err in error: assert err == 0",
"def test_zmq_does_not_crash_worker(caplog):\n procedure = RandomProcedure()\n file = tempfile.mktemp()\n results = Results(procedure, file)\n # If we define a port here we get ZMQ communication\n # if cloudpickle is installed\n worker = Worker(results, port=5888, log_level=logging.DEBUG)\n worker.start()\n worker.join(timeout=20.0) # give it enough time to finish the procedure\n assert procedure.status == procedure.FINISHED\n del worker # make sure to clean up, reduce the possibility of test\n # dependencies via left-over sockets",
"def test_stop_on_exit(self):\n atexit = []\n reactor = FakeReactor()\n s = EventLoop(lambda: reactor, lambda f, *args: atexit.append((f, args)))\n s.setup()\n self.assertEqual(len(atexit), 2)\n self.assertFalse(reactor.stopping)\n f, args = atexit[0]\n self.assertEqual(f, reactor.callFromThread)\n self.assertEqual(args, (reactor.stop,))\n f(*args)\n self.assertTrue(reactor.stopping)\n f, args = atexit[1]\n self.assertEqual(f, _store.log_errors)\n self.assertEqual(args, ())\n f(*args) # make sure it doesn't throw an exception",
"def run(self):\r\n # can try: except: here to catch errors and display more verbose error messages.\r\n _start_time = time.time()\r\n for test in self._tests:\r\n self._currently_running = test.__name__\r\n self._messageHandler.setContext(self._currently_running)\r\n\r\n ColorPrint.warn(\" RUNS \", end=\"\", background=True)\r\n ColorPrint.white(\" {}\".format(self._currently_running), end=\"\\r\")\r\n \r\n self.beforeEach()\r\n try:\r\n test()\r\n except Exception as error:\r\n # ExpectationFailure is raised because Expect doesn't know if\r\n # it is running in a testsuite.\r\n exc_type, exc_value, exc_traceback = sys.exc_info()\r\n tracebackFormatted = traceback.format_tb(exc_traceback)\r\n if not isinstance(error, ExpectationFailure):\r\n self._messageHandler.queueError(error, tracebackFormatted)\r\n\r\n ColorPrint.fail(\" FAIL \",end=\"\", background=True)\r\n ColorPrint.white(\" {}\".format(self._currently_running))\r\n self._status[test.__name__] = \"failed\"\r\n else:\r\n ColorPrint.green(\" PASS \",end=\"\", background=True)\r\n ColorPrint.green(\" {}\".format(self._currently_running))\r\n self._status[test.__name__] = \"passed\"\r\n self.afterEach()\r\n self._run_time = round(time.time() - _start_time, 2)\r\n self._messageHandler.popAll()\r\n print()\r\n ColorPrint.info(\"Ran all tests in {} seconds\".format(self._run_time))\r\n if any(map(lambda key: self._status[key] == \"failed\", self._status)): \r\n sys.exit(not self.exit_gracefully) # 0 if should exit gracefully, 1 otherwise.\r",
"def _check_jobs(self):\n testmode = self.am_getOption(\"TestMode\", False)\n simudb = SimuInterface(create_connection(testmode=testmode))\n try:\n simusdict = simudb.get_runs_with_status_in_group_with_status(status=[\"new\"],\n gstat=[\"new\", \"submitting\"])\n except:\n return S_ERROR(\"Couldn't get the simu dict\")\n simudb.close_session()\n return S_OK(len(simusdict.keys()))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
RPC called by a collaborator to determine which tasks to perform.
|
def get_tasks(self, collaborator_name):
self.logger.debug(
f'Aggregator GetTasks function reached from collaborator {collaborator_name}...'
)
# first, if it is time to quit, inform the collaborator
if self._time_to_quit():
self.logger.info(f'Sending signal to collaborator {collaborator_name} to shutdown...')
self.quit_job_sent_to.append(collaborator_name)
tasks = None
sleep_time = 0
time_to_quit = True
return tasks, self.round_number, sleep_time, time_to_quit
time_to_quit = False
# otherwise, get the tasks from our task assigner
tasks = self.assigner.get_tasks_for_collaborator(
collaborator_name,
self.round_number) # fancy task assigners may want aggregator state
# if no tasks, tell the collaborator to sleep
if len(tasks) == 0:
tasks = None
sleep_time = self._get_sleep_time()
return tasks, self.round_number, sleep_time, time_to_quit
# if we do have tasks, remove any that we already have results for
tasks = [
t for t in tasks if not self._collaborator_task_completed(
collaborator_name, t, self.round_number)
]
# Do the check again because it's possible that all tasks have
# been completed
if len(tasks) == 0:
tasks = None
sleep_time = self._get_sleep_time()
return tasks, self.round_number, sleep_time, time_to_quit
self.logger.info(
f'Sending tasks to collaborator {collaborator_name} for round {self.round_number}'
)
sleep_time = 0
return tasks, self.round_number, sleep_time, time_to_quit
|
[
"def _service_task(self):\r\n pass",
"def get_task(self, task_name):",
"def task_list(self):\n self.developed_model_version_id = new_models(self.old_developed_model_version_id,\n db_connection=self.db_connection,\n gbd_round_id=self.gbd_round_id,\n decomp_step_id=self.decomp_step_id,\n desc=self.description + ' previous version {}'.\n format(self.old_developed_model_version_id))[0]\n self.developed_task = CODEmTask(model_version_id=self.developed_model_version_id,\n db_connection=self.db_connection,\n gbd_round_id=self.gbd_round_id,\n parameter_dict=self.codem_params,\n cores=self.num_cores)\n self.global_model_version_id = new_models(self.old_global_model_version_id,\n db_connection=self.db_connection,\n gbd_round_id=self.gbd_round_id,\n decomp_step_id=self.decomp_step_id,\n desc=self.description + ' previous version {}'.\n format(self.old_global_model_version_id))[0]\n self.global_task = CODEmTask(model_version_id=self.global_model_version_id,\n db_connection=self.db_connection,\n gbd_round_id=self.gbd_round_id,\n parameter_dict=self.codem_params,\n cores=self.num_cores)\n self.hybrid_task = HybridTask(user=self.user,\n developed_model_version_id=self.developed_model_version_id,\n global_model_version_id=self.global_model_version_id,\n conn_def=self.conn_def,\n upstream_tasks=[self.developed_task,\n self.global_task],\n parameter_dict=self.hybridizer_params)\n return [self.developed_task, self.global_task, self.hybrid_task]",
"def test_get_subtasks_for_task(self):\n pass",
"def executor_cli():",
"def get_tasks(self, job_params, marker=None):\n raise NotImplementedError()",
"def test_get_task_instances(self):\n pass",
"async def tasks_all(self, ctx):\n if isinstance(ctx.channel, discord.TextChannel):\n await ctx.send(\"This is a long list. I'm going to send it to your DM. To view items \"\n \"in the Council Chat, please request them individually (`++tasks suggestions`).\")\n # Suggestions\n result = sheet.values().get(spreadsheetId=spreadsheet_id, range=\"Suggestions!A2:I\").execute()\n values = result.get(\"values\", [])\n embed = discord.Embed(title=\"RCS Council Suggestions\", color=discord.Color.blurple())\n flag = 0\n for row in values:\n if len(row) < 9:\n embed.add_field(name=f\"Suggestion from {row[1]}\\n{row[7]}\",\n value=f\"{row[3][:500]}\\nDated {row[0]}\",\n inline=False)\n embed.set_footer(text=\"Use ++tasks done <Task ID> to complete a task\")\n if len(embed.fields) > 0:\n flag = 1\n await ctx.author.send(embed=embed)\n # Council Nominations\n result = sheet.values().get(spreadsheetId=spreadsheet_id, range=\"Council!A2:J\").execute()\n values = result.get(\"values\", [])\n embed = discord.Embed(title=\"RCS Council Nominations\", color=discord.Color.dark_gold())\n for row in values:\n if row[8] == \"\":\n embed.add_field(name=f\"Council Nomination for {row[3]}\\n{row[9]}\",\n value=f\"Submitted by {row[1]}\\nDated {row[0]}\",\n inline=False)\n embed.set_footer(text=\"Use ++tasks done <Task ID> to complete a task\")\n if len(embed.fields) > 0:\n flag = 1\n await ctx.author.send(embed=embed)\n # Verification Requests\n result = sheet.values().get(spreadsheetId=spreadsheet_id, range=\"Verification!A2:I\").execute()\n values = result.get(\"values\", [])\n embed = discord.Embed(title=\"RCS Council Verification Requests\", color=discord.Color.dark_blue())\n for row in values:\n if len(row) < 9 or row[8] in (\"1\", \"2\", \"3\", \"4\"):\n status = \"has not been addressed\"\n try:\n if row[8] == \"1\": status = \" is awaiting a scout\"\n if row[8] == \"2\": status = \" is currently being scouted\"\n if row[8] == \"3\": status = \" is awaiting the post-scout survey\"\n if row[8] == \"4\": status = \" is awaiting a decision by Council\"\n except:\n self.bot.logger.debug(\"row is shorter than 9\")\n embed.add_field(name=f\"Verification for {row[1]} {status}.\\n{row[7]}\",\n value=f\"Leader: {row[3]}\\nDated {row[0]}\",\n inline=False)\n embed.set_footer(text=\"Use ++tasks update <Task ID> to change the status.\")\n if len(embed.fields) > 0:\n flag = 1\n await ctx.author.send(embed=embed)\n # Other Submissions\n result = sheet.values().get(spreadsheetId=spreadsheet_id, range=\"Other!A2:I\").execute()\n values = result.get(\"values\", [])\n embed = discord.Embed(title=\"RCS Council Other Items\", color=discord.Color.gold())\n for row in values:\n if len(row) < 9:\n if len(row[6]) > 1:\n assigned_to = f\"Assigned to: {self.guild.get_member(int(row[6])).display_name}\"\n else:\n assigned_to = \"Unassigned\"\n embed.add_field(name=f\"Other Comment from {row[1]}\\n{row[7]}\",\n value=(f\"{row[3][:500]}\\n{assigned_to}\\n\"\n f\"Dated {row[0]}\"),\n inline=False)\n embed.set_footer(text=\"Use ++tasks done <Task ID> to complete a task\")\n if len(embed.fields) > 0:\n flag = 1\n await ctx.author.send(embed=embed)\n # Tasks (Individual Action Items)\n result = sheet.values().get(spreadsheetId=spreadsheet_id, range=\"Tasks!A2:I\").execute()\n values = result.get(\"values\", [])\n embed = discord.Embed(title=\"RCS Council Action Items\", color=discord.Color.dark_magenta())\n for row in values:\n if len(row) < 9:\n if len(row[6]) > 1:\n assigned_to = f\"Assigned to: {self.guild.get_member(int(row[6])).display_name}\"\n else:\n assigned_to = \"Unassigned\"\n embed.add_field(name=f\"{assigned_to}\\n{row[7]}\",\n value=f\"{row[1]}\\nDated: {row[0]}\",\n inline=False)\n embed.set_footer(text=\"Use ++tasks done <Task ID> to complete a task\")\n if len(embed.fields) > 0:\n flag = 1\n await ctx.author.send(embed=embed)\n if flag == 0:\n await ctx.send(\"No incomplete tasks at this time! Well done!\")",
"def test_get_tasks_for_project(self):\n pass",
"def supervisor_tasks(self):\n try:\n option = {\n \"1\": (\"Show Complaint\", Supervisor.show_complaint),\n \"2\": (\"Create Reports\", Supervisor.create_report),\n \"3\": (\"Show Reports\", Supervisor.show_reports)\n }\n ans = input(\"Choose:\\n\"\n \"1.Show Complaint.\\n\"\n \"2.Create Reports.\\n\"\n \"3.Show Reports.\\n\")\n\n option.get(ans)[1](conn)\n except Exception as e:\n print(\"Invalid Choice. Please select again!\")\n Supervisor.supervisor_tasks(self)",
"def __tasks__(self):\n\n if self.project_level or self.pipeline_type.lower() == \"ss2\":\n return []\n\n workflow_metadata = self.__metadata__()\n return format_map.get_workflow_tasks(workflow_metadata)",
"def execute(self):\n result = getattr(self, self.task)()\n return result",
"def main():\n\n from sys import argv\n opts = getopts(argv)\n\n if \"-t\" in opts:\n task_name = opts[\"-t\"]\n else:\n print(\"Error: must specify -t\")\n return\n\n task_map = {\n \"coin_list\": import_coin_list,\n \"historical\": import_historical_data,\n \"current\": import_current_data,\n \"twitter\": import_twitter_data,\n \"analysis\": analysis_tasks,\n \"cc_stats\": cc_stats_task,\n \"db_stats\": db_stats,\n \"stocktwits\": import_stocktwits\n }\n\n if task_name not in task_map:\n print(\"Error: task {} should be one of {}\".format(task_name, list(task_map.keys())))\n return\n\n tasks.init()\n\n task_map[task_name]()",
"def execute(self, tests_by_task: Dict[str, TaskInfo]) -> None:\n raise NotImplementedError()",
"def eval_tasks(self):\n return self._eval_tasks",
"def tasks(**_):\n for task in filter(bool, get_all_tasks()):\n print(task)",
"def main():\n task = Task()\n task.run_task()",
"def get_executor(self):\n pass",
"def test_get_tasks_for_section(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check if the collaborator has completed the task for the round. The aggregator doesn't actually know which tensors should be sent from the collaborator \ so it must to rely specifically on the presence of previous results
|
def _collaborator_task_completed(self, collaborator, task_name, round_num):
task_key = TaskResultKey(task_name, collaborator, round_num)
return task_key in self.collaborator_tasks_results
|
[
"def _is_round_done(self):\n tasks_for_round = self.assigner.get_all_tasks_for_round(\n self.round_number\n )\n\n return all([self._is_task_done(t) for t in tasks_for_round])",
"def mcmc_done(self):\n if self.mcmc_results is not None:\n return True\n return False",
"def has_completed_every_train(self):\n return len(self.trains_queue) == 0 and all([train.has_finished() for train in self.trains])",
"def _end_of_round_check(self):\n if not self._is_round_done():\n return\n\n # Compute all validation related metrics\n all_tasks = self.assigner.get_all_tasks_for_round(self.round_number)\n for task_name in all_tasks:\n self._compute_validation_related_task_metrics(task_name)\n\n # Once all of the task results have been processed\n # Increment the round number\n self.round_number += 1\n\n # Save the latest model\n self.logger.info(f'Saving round {self.round_number} model...')\n self._save_model(self.round_number, self.last_state_path)\n\n # TODO This needs to be fixed!\n if self._time_to_quit():\n self.logger.info('Experiment Completed. Cleaning up...')\n else:\n self.logger.info(f'Starting round {self.round_number}...')\n\n # Cleaning tensor db\n self.tensor_db.clean_up(self.db_store_rounds)",
"async def process_epoch(self) -> bool:\n\n # Any calculations done within the current epoch would be included here.\n # Also sending of any result messages (other than Status message) would be included here.\n return True # only if the component is done for the current epoch\n # return False # if the component still has things to do within the current epoch",
"def is_finished(self):\n return self.current_element == len(self.work_data)",
"def training_complete(self):\n return self.current_task >= len(self.tasks)",
"def is_complete(self) -> bool:\n node_ids = set(self.graph.nx_graph_flat().nodes)\n return self.has_error() or all((k in self.executed for k in node_ids))",
"def has_finished(grid):\n\n if not get_cell_count(grid) and grid.generation > 0:\n return True\n\n return False",
"def is_finished( self ):\n output = subprocess.getoutput( \"squeue -h -j \" + self.job_num )\n return not output",
"def is_done(self, jobdesc=None):\n\n # Cache the result so we can return immediately without hitting\n # any of the Amazon APIs\n if self._is_done:\n return True\n iter_no = self._get_last_process_step_iter_no(jobdesc=jobdesc)\n if iter_no < 0:\n return False\n i = self._last_process_step_iter_no\n\n while i < iter_no:\n i += 1\n outdir = self._get_default_outdir('process', iter_no=i)\n keyname = self._get_keyname(outdir, 'part-00000')\n\n bucket = self._s3_conn.get_bucket(self._s3_bucket)\n key = bucket.get_key(keyname)\n contents = ''\n\n if key is not None:\n contents = key.next() # get first chunk of the output file\n if contents.startswith('FinalRank'):\n self._is_done = True # cache result\n break\n\n self._last_process_step_iter_no = i\n\n return self._is_done",
"def is_done(self) -> bool:\n n_completed = 0\n final_depth = self._get_depth_of(self.fidelities[-1])\n for trial in self.lineages.get_trials_at_depth(final_depth):\n n_completed += int(trial.status == \"completed\")\n\n return n_completed >= self.population_size",
"def finished(self):\n return self.board == self.goal",
"def check_finished(self):\n if self.max_iterations == -1:\n return False\n return self.iterations >= self.max_iterations",
"def is_operation_finished(self):\n return self._is_operation_finished",
"def is_done(self):\n return not (self.patrn_bfs_queue and self.sub_bfs_queue)",
"def done(self):\n return bool(self.output)",
"def done(self):\n return hasattr(self, \"_duration\")",
"def finished(self):\n # type: () -> bool\n return self._status is None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
RPC called by collaborator. Transmits collaborator's task results to the aggregator.
|
def send_local_task_results(self, collaborator_name, round_number, task_name,
data_size, named_tensors):
self.logger.info(
f'Collaborator {collaborator_name} is sending task results '
f'for {task_name}, round {round_number}'
)
task_key = TaskResultKey(task_name, collaborator_name, round_number)
# we mustn't have results already
if self._collaborator_task_completed(
collaborator_name, task_name, round_number
):
raise ValueError(
f'Aggregator already has task results from collaborator {collaborator_name}'
f' for task {task_key}'
)
# initialize the list of tensors that go with this task
# Setting these incrementally is leading to missing values
task_results = []
# go through the tensors and add them to the tensor dictionary and the
# task dictionary
for named_tensor in named_tensors:
# sanity check that this tensor has been updated
if named_tensor.round_number != round_number:
raise ValueError(
f'Collaborator {collaborator_name} is reporting results for the wrong round.'
f' Exiting...'
)
# quite a bit happens in here, including decompression, delta
# handling, etc...
tensor_key, nparray = self._process_named_tensor(
named_tensor, collaborator_name
)
task_results.append(tensor_key)
# By giving task_key it's own weight, we can support different
# training/validation weights
# As well as eventually supporting weights that change by round
# (if more data is added)
self.collaborator_task_weight[task_key] = data_size
self.collaborator_tasks_results[task_key] = task_results
self._end_of_task_check(task_name)
|
[
"def push_result(self, task_request, task_response):",
"def _task_submitter_impl(self) -> None:\n log.debug(\n \"%s: task submission thread started (%s)\", self, threading.get_ident()\n )\n to_send = self._tasks_to_send # cache lookup\n\n # Alias types -- this awkward typing is all about the dict we use\n # internally to make sure we appropriately group tasks for upstream\n # submission. For example, if the user submitted to two different\n # endpoints, we separate the tasks by the dictionary key.\n class SubmitGroup(t.NamedTuple):\n task_group_uuid: uuid.UUID | None\n endpoint_uuid: uuid.UUID\n user_endpoint_config: str\n\n SubmitGroupFutures = t.Dict[\n SubmitGroup,\n t.List[ComputeFuture],\n ]\n SubmitGroupTasks = t.Dict[\n SubmitGroup,\n t.List[_TaskSubmissionInfo],\n ]\n\n try:\n fut: ComputeFuture | None = ComputeFuture() # just start the loop; please\n while fut is not None:\n futs: SubmitGroupFutures = defaultdict(list)\n tasks: SubmitGroupTasks = defaultdict(list)\n task_count = 0\n try:\n fut, task = to_send.get() # Block; wait for first result ...\n task_count += 1\n bs = max(1, self.batch_size) # May have changed while waiting\n while task is not None:\n assert fut is not None # Come on mypy; contextually clear!\n submit_group = SubmitGroup(\n task.task_group_uuid,\n task.endpoint_uuid,\n # dict type is unhashable\n json.dumps(task.user_endpoint_config, sort_keys=True),\n )\n tasks[submit_group].append(task)\n futs[submit_group].append(fut)\n if any(len(tl) >= bs for tl in tasks.values()):\n break\n fut, task = to_send.get(block=False) # ... don't block again\n task_count += 1\n except queue.Empty:\n pass\n\n if not tasks:\n continue\n\n for submit_group, task_list in tasks.items():\n fut_list = futs[submit_group]\n\n tg_uuid, ep_uuid, uep_config = submit_group\n uep_config = json.loads(uep_config)\n # Needed for mypy\n assert uep_config is None or isinstance(uep_config, dict)\n log.info(\n f\"Submitting tasks for Task Group {tg_uuid} to\"\n f\" Endpoint {ep_uuid}: {len(task_list):,}\"\n )\n\n self._submit_tasks(\n tg_uuid, ep_uuid, uep_config, fut_list, task_list\n )\n\n to_watch = [f for f in fut_list if f.task_id and not f.done()]\n if not to_watch:\n continue\n\n with self._shutdown_lock:\n if self._stopped:\n continue\n\n if not (\n self._result_watcher and self._result_watcher.is_alive()\n ):\n # Don't initialize the result watcher unless at least\n # one batch has been sent\n self._result_watcher = _ResultWatcher(self)\n self._result_watcher.start()\n try:\n self._result_watcher.watch_for_task_results(to_watch)\n except self._result_watcher.__class__.ShuttingDownError:\n log.debug(\"Waiting for previous ResultWatcher to shutdown\")\n self._result_watcher.join()\n self._result_watcher = _ResultWatcher(self)\n self._result_watcher.start()\n self._result_watcher.watch_for_task_results(to_watch)\n\n # important to clear futures; else a legitimately early-shutdown\n # request (e.g., __exit__()) can cancel these (finally block,\n # below) before the result comes back, even though _result_watcher\n # is already watching them.\n futs.clear()\n\n while task_count:\n task_count -= 1\n to_send.task_done()\n\n except Exception as exc:\n self._stopped = True\n self._stopped_in_error = True\n log.debug(\n \"%s: task submission thread encountered error ([%s] %s)\",\n self,\n exc.__class__.__name__,\n exc,\n )\n\n if self._shutdown_lock.acquire(blocking=False):\n self.shutdown(wait=False, cancel_futures=True)\n self._shutdown_lock.release()\n\n log.debug(\"%s: task submission thread dies\", self)\n raise\n finally:\n if sys.exc_info() != (None, None, None):\n time.sleep(0.1) # give any in-flight Futures a chance to be .put() ...\n while not self._tasks_to_send.empty():\n fut, _task = self._tasks_to_send.get()\n if fut:\n fut.cancel()\n fut.set_running_or_notify_cancel()\n try:\n while True:\n self._tasks_to_send.task_done()\n except ValueError:\n pass\n log.debug(\"%s: task submission thread complete\", self)",
"def submit(self):\n \n # TODO: send job to scheduler ",
"def _process_workers_result(self, tasks, workers_result):\n raise NotImplementedError",
"def get_tasks(self, collaborator_name):\n self.logger.debug(\n f'Aggregator GetTasks function reached from collaborator {collaborator_name}...'\n )\n\n # first, if it is time to quit, inform the collaborator\n if self._time_to_quit():\n self.logger.info(f'Sending signal to collaborator {collaborator_name} to shutdown...')\n self.quit_job_sent_to.append(collaborator_name)\n\n tasks = None\n sleep_time = 0\n time_to_quit = True\n\n return tasks, self.round_number, sleep_time, time_to_quit\n\n time_to_quit = False\n\n # otherwise, get the tasks from our task assigner\n tasks = self.assigner.get_tasks_for_collaborator(\n collaborator_name,\n self.round_number) # fancy task assigners may want aggregator state\n\n # if no tasks, tell the collaborator to sleep\n if len(tasks) == 0:\n tasks = None\n sleep_time = self._get_sleep_time()\n\n return tasks, self.round_number, sleep_time, time_to_quit\n\n # if we do have tasks, remove any that we already have results for\n tasks = [\n t for t in tasks if not self._collaborator_task_completed(\n collaborator_name, t, self.round_number)\n ]\n\n # Do the check again because it's possible that all tasks have\n # been completed\n if len(tasks) == 0:\n tasks = None\n sleep_time = self._get_sleep_time()\n\n return tasks, self.round_number, sleep_time, time_to_quit\n\n self.logger.info(\n f'Sending tasks to collaborator {collaborator_name} for round {self.round_number}'\n )\n sleep_time = 0\n\n return tasks, self.round_number, sleep_time, time_to_quit",
"def get_results(self):\n for t in self.task:\n print t.get()",
"async def task_subprocess_callback(self, request_id, worker_id):\n try:\n results = await self.task_subprocess_results[request_id].get()\n await self.rpc_server['manager'][f'add_task_subprocess_results_{worker_id}'](request_id, results)\n return f\"added results for request_id {request_id} to worker_id {worker_id}\"\n\n except Exception as e:\n if not isinstance(e, asyncio.CancelledError):\n self.log.exception(f\"error with task_subprocess_callback for request_id: {request_id}\")",
"def execute(self):\n result = getattr(self, self.task)()\n return result",
"def _ComputationWorkThread(self, withVideo):\n if not os.path.exists(PATH_TO_STATISTIC_RESULTS + '/individual'):\n os.makedirs(PATH_TO_STATISTIC_RESULTS + '/individual')\n if not os.path.exists(PATH_TO_STATISTIC_RESULTS + '/users'):\n os.makedirs(PATH_TO_STATISTIC_RESULTS + '/users')\n if not os.path.exists(PATH_TO_STATISTIC_RESULTS + '/videos'):\n os.makedirs(PATH_TO_STATISTIC_RESULTS + '/videos')\n if not os.path.exists(PATH_TO_STATISTIC_RESULTS + '/byAge'):\n os.makedirs(PATH_TO_STATISTIC_RESULTS + '/byAge')\n if not os.path.exists(PATH_TO_STATISTIC_RESULTS + '/total'):\n os.makedirs(PATH_TO_STATISTIC_RESULTS + '/total')\n vmax = 0\n step = 0.03\n\n self.userManager.StoreUserStats(PATH_TO_STATISTIC_RESULTS +\n '/total/users')\n\n pool = ProcessingPool()\n for resultId in self.resultsContainers:\n rc = self.resultsContainers[resultId]\n userId = rc.user.uid\n videoId = rc.videoId\n rc.user.ParseFormAnswers()\n sex = rc.user.sex\n age = rc.user.age\n\n self.resultsByUser[userId].append(\n rc)\n self.resultsByVideo[videoId].append(\n rc)\n self.resultsByAge[age - age % self.ageStep].append(\n rc)\n self.resultsBySex[sex].append(\n rc)\n # vmax = \\\n # max(vmax,\n # self.resultsById[resultId].positionMatrix.max()\n # )\n\n print('\\r\\033[2KProcess individual results')\n self.PrintProgress()\n def WorkerResults(step, rc):\n if rc.isNew:\n processedResult = rc.GetProcessedResult(step)\n processedResult.StorePositions(\n PATH_TO_STATISTIC_RESULTS+'/individual/{}'.format(rc.resultId),\n vmax=None\n )\n processedResult.StoreAngularVelocity(\n PATH_TO_STATISTIC_RESULTS+'/individual/{}.txt'.format(rc.resultId)\n )\n return rc\n async_result = [\n pool.apipe(\n WorkerResults,\n step, self.resultsContainers[resultId]\n ) for resultId in self.resultsContainers\n ]\n for r in async_result:\n rc = r.get()\n self.resultsContainers[rc.resultId] = rc\n self.progressBar['value'] += 1\n self.PrintProgress()\n aggrUserResults = dict()\n aggrVideoResults = dict()\n aggrAgeResults = dict()\n print('\\r\\033[2KProcess results by user')\n self.PrintProgress()\n def WorkerUsers(resultsByUser, userId, step):\n aggSize = len(resultsByUser)\n if aggSize > 0:\n dumpPath = \\\n PATH_TO_STATISTIC_RESULTS+'/users/uid-{}.dump'.format(userId)\n ac = AggregateContainer.Load(dumpPath, step, aggSize)\n if ac.isNew:\n aggResult = sum(resultsByUser)\n aggResult.StorePositions(\n PATH_TO_STATISTIC_RESULTS+'/users/uid-{}'.format(userId),\n vmax=None\n )\n aggResult.StoreVision(\n PATH_TO_STATISTIC_RESULTS+'/users/uid-{}_vision'.format(userId)\n )\n aggResult.StoreAngularVelocity(\n PATH_TO_STATISTIC_RESULTS+'/users/uid-{}.txt'.format(userId)\n )\n aggResult.StoreOrthodromicDistance(\n PATH_TO_STATISTIC_RESULTS +\n '/users/uid-{}_orthoDist.txt'.format(userId)\n )\n # vmax = max(vmax,\n # aggResult.aggPositionMatrix.max())\n Store(ac, dumpPath)\n return None\n async_result = [\n pool.apipe(\n WorkerUsers,\n self.resultsByUser[userId], userId, step\n ) for userId in self.resultsByUser\n ]\n for r in async_result:\n r.get()\n self.progressBar['value'] += 1\n self.PrintProgress()\n\n print('\\r\\033[2KProcess results by age')\n self.PrintProgress()\n def WorkerAge(resultsByAge, ageStep, age, step):\n aggSize = len(resultsByAge)\n if aggSize > 0:\n dumpPath = PATH_TO_STATISTIC_RESULTS+'/byAge/{}_{}.dump'.format(\n age, age + ageStep\n )\n ac = AggregateContainer.Load(dumpPath, step, aggSize)\n if ac.isNew:\n aggResult = sum(resultsByAge)\n aggResult.StorePositions(\n PATH_TO_STATISTIC_RESULTS+'/byAge/{}_{}'.format(\n age, age + ageStep),\n vmax=None\n )\n aggResult.StoreVision(\n PATH_TO_STATISTIC_RESULTS+'/byAge/{}_{}_vision'.format(\n age, age + ageStep)\n )\n aggResult.StoreAngularVelocity(\n PATH_TO_STATISTIC_RESULTS+'/byAge/{}_{}.txt'.format(\n age, age + ageStep)\n )\n aggResult.StoreOrthodromicDistance(\n PATH_TO_STATISTIC_RESULTS +\n '/byAge/{}_{}_orthoDist.txt'.format(age, age + ageStep)\n )\n # vmax = max(vmax,\n # aggResult.aggPositionMatrix.max())\n Store(ac, dumpPath)\n return None\n\n async_result = [\n pool.apipe(\n WorkerAge,\n self.resultsByAge[age], self.ageStep, age, step\n ) for age in self.resultsByAge\n ]\n for r in async_result:\n r.get()\n self.progressBar['value'] += 1\n self.PrintProgress()\n\n print('\\r\\033[2KProcess results by video')\n self.PrintProgress()\n def WorkerVideo(resultsByVideo, videoId, step, withVideo):\n aggSize = len(resultsByVideo)\n if aggSize > 0:\n dumpPath = PATH_TO_STATISTIC_RESULTS+'/videos/{}.dump'.format(videoId)\n ac = AggregateContainer.Load(dumpPath, step, aggSize)\n if ac.isNew:\n aggResult = sum(resultsByVideo)\n aggResult.StoreVisionDistance(PATH_TO_STATISTIC_RESULTS + \\\n '/videos/'\n '{}_visionDistance'.format(\n videoId))\n # DEBUG\n aggResult.StorePositions(\n PATH_TO_STATISTIC_RESULTS+'/videos/{}'.format(videoId),\n vmax=None\n )\n aggResult.StoreVision(\n PATH_TO_STATISTIC_RESULTS+'/videos/{}_vision'.format(videoId)\n )\n aggResult.StoreAngularVelocity(\n PATH_TO_STATISTIC_RESULTS+'/videos/{}.txt'.format(videoId)\n )\n aggResult.StoreOrthodromicDistance(\n PATH_TO_STATISTIC_RESULTS +\n '/videos/{}_orthoDist.txt'.format(videoId)\n )\n for segmentSize in [1, 2, 3]:\n aggResult.StoreAngularVelocityPerSegment(\n segmentSize=segmentSize,\n filePath=PATH_TO_STATISTIC_RESULTS+'/videos/' +\n '{}_angVelPerSegment_{}s.txt'.format(videoId,\n segmentSize)\n )\n if withVideo:\n # aggResult.WriteVideo(\n aggResult.WriteVideoVision(\n PATH_TO_STATISTIC_RESULTS+'/videos/{}.mkv'.format(videoId),\n fps=5,\n segmentSize=1/5,\n widthVideo=960,\n heightVideo=480,\n widthEqui=100,\n heightEqui=50,\n horizontalFoVAngle=110,\n verticalFoVAngle=90\n )\n # vmax = max(vmax,\n # aggResult.aggPositionMatrix.max())\n Store(ac, dumpPath)\n return None\n\n async_result = [\n pool.apipe(\n WorkerVideo,\n self.resultsByVideo[videoId], videoId, step, withVideo\n ) for videoId in self.resultsByVideo\n ]\n for r in async_result:\n r.get()\n self.progressBar['value'] += 1\n self.PrintProgress()\n\n print('\\r\\033[2KProcess results total')\n self.PrintProgress()\n aggSize = len(self.resultsContainers)\n dumpPath = PATH_TO_STATISTIC_RESULTS+'/total/{}.dump'.format('total')\n ac = AggregateContainer.Load(dumpPath, step, aggSize)\n if ac.isNew:\n aggTotal = sum(self.resultsContainers.values())\n aggTotal.StorePositions(\n PATH_TO_STATISTIC_RESULTS+'/total/{}'.format('total'),\n vmax=None\n )\n aggTotal.StoreVision(\n PATH_TO_STATISTIC_RESULTS+'/total/{}_vision'.format('total')\n )\n aggTotal.StoreAngularVelocity(\n PATH_TO_STATISTIC_RESULTS+'/total/{}.txt'.format('total')\n )\n aggTotal.StoreOrthodromicDistance(\n PATH_TO_STATISTIC_RESULTS+'/total/{}.txt'.format('orthoDist')\n )\n for segmentSize in [1, 2, 3]:\n aggTotal.StoreAngularVelocityPerSegment(\n segmentSize=segmentSize,\n filePath=PATH_TO_STATISTIC_RESULTS+'/total/' +\n '{}_angVelPerSegment_{}s.txt'.format('total',\n segmentSize),\n useRealTimestamp=False\n )\n Store(ac, dumpPath)\n self.progressBar['value'] += 1\n self.PrintProgress()\n\n # def worker(videoId, processedResult):\n # processedResult.WriteVideo(\n # 'results/statistics/videos/{}.mkv'.format(videoId),\n # fps=5,\n # segmentSize=1/5,\n # width=480,\n # height=480\n # )\n # return None\n # async_result = [\n # pool.apipe(\n # worker,\n # videoId, aggrVideoResults[videoId]\n # ) for videoId in aggrVideoResults\n # ]\n # for r in async_result:\n # r.get()\n # self.progressBar['value'] += 1\n # pool.close()\n # pool.join()\n # del self.workingThread\n\n listProcessedResult = list()\n for resultId in self.resultsContainers:\n rc = self.resultsContainers[resultId]\n videoId = resultId.split('_')[-1]\n userId = resultId[:-len(videoId)-1]\n listProcessedResult.append((userId, videoId,\n rc.GetProcessedResult(step)))\n ProcessedResult.StoreAngVelStats(listProcessedResult,\n PATH_TO_STATISTIC_RESULTS +\n '/total/stats')\n self.done = True\n self.progressBar = None\n self.workingThread = None\n if self.doneCallback is not None:\n self.doneCallback()",
"def _work(self, **kwargs):\n \n results = self.work(**kwargs)\n self._complete(results)\n return results",
"def execute(self, worker):\r\n res = map(worker, self)\r\n\r\n return res",
"def _process_task(self):\n try:\n logger.info('Processing task %s', self._current_task.operation)\n m = getattr(self.transform_plugin, self._current_task.operation)\n m(**(self._current_task.parameters or {}))\n with self._task_lock.writer_lock:\n self._current_task.status = 'complete'\n self._current_task.completed = datetime.now()\n logger.info('Processing of task is complete')\n except Exception as ex: #pylint: disable=broad-except\n logger.exception(\"Error occurred running task\")\n self._current_task.status = 'failed'\n self._current_task.message = str(ex)\n raise",
"def submit(self, seq):\n return self.pool.apply_async(self.calc, (seq,))",
"def run(self):\r\n\r\n self.logger.debug(\"Controller started.\")\r\n\r\n response = {}\r\n while True:\r\n task = self.tq.get()\r\n if task == \"stop\":\r\n break\r\n try:\r\n response = self.call_method(task.sender, task.body)\r\n\r\n except ResourceException as err:\r\n self.logger.error(\"%s\" % err)\r\n\r\n if response.get('status'):\r\n del response['status']\r\n response['error'] = '%s' % err\r\n\r\n except Exception:\r\n self.logger.debug('{0}'.format(traceback.format_exc()))\r\n\r\n finally:\r\n self.pq.put(AmqpTask(response, headers=task.headers))",
"def add(self, task_id, node_id, result):",
"def process_task(self, task):\n raise NotImplementedError()",
"def go(self):\n self.get_details()\n self.run()\n if hasattr(self, 'result'):\n self.submit_results()",
"def _result(self, res):\n worker_info = self.workers[res.source]\n worker_info.pending_results -= 1\n self.pending_results -= 1\n assert worker_info.pending_results >= 0\n assert self.pending_results >= 0\n if worker_info.pending_results == 0:\n self.available.append(worker_info)\n self._log_available()\n self.results.put(res)\n\n # Try to process as many queued requests as possible.\n assert len(self.requests) == 0 or self.requests[0].target == ANY\n while len(self.requests) > 0:\n req = self.requests.popleft()\n if req.target == ANY:\n if len(self.available) > 0:\n self._request(req, force_processing=True)\n else:\n self.requests.appendleft(req)\n break\n else:\n self._request(req, force_processing=True)\n assert len(self.requests) == 0 or self.requests[0].target == ANY",
"def put_results(self, *args, **kwargs):\n with self.result_queue_pool.item() as queue:\n return queue.put(\n yaml.dump(add_dicts(*args, completed_at=time.time(),\n worker_id=self.worker_id, **kwargs)))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Extract the named tensor fields. Performs decompression, delta computation, and inserts results into TensorDB.
|
def _process_named_tensor(self, named_tensor, collaborator_name):
raw_bytes = named_tensor.data_bytes
metadata = [{'int_to_float': proto.int_to_float,
'int_list': proto.int_list,
'bool_list': proto.bool_list}
for proto in named_tensor.transformer_metadata]
# The tensor has already been transfered to aggregator,
# so the newly constructed tensor should have the aggregator origin
tensor_key = TensorKey(
named_tensor.name,
self.uuid,
named_tensor.round_number,
named_tensor.report,
tuple(named_tensor.tags)
)
tensor_name, origin, round_number, report, tags = tensor_key
assert ('compressed' in tags or 'lossy_compressed' in tags), (
f'Named tensor {tensor_key} is not compressed'
)
if 'compressed' in tags:
dec_tk, decompressed_nparray = self.tensor_codec.decompress(
tensor_key,
data=raw_bytes,
transformer_metadata=metadata,
require_lossless=True
)
dec_name, dec_origin, dec_round_num, dec_report, dec_tags = dec_tk
# Need to add the collaborator tag to the resulting tensor
if type(dec_tags) == str:
new_tags = tuple([dec_tags] + [collaborator_name])
else:
new_tags = tuple(list(dec_tags) + [collaborator_name])
# layer.agg.n.trained.delta.col_i
decompressed_tensor_key = TensorKey(
dec_name, dec_origin, dec_round_num, dec_report, new_tags
)
if 'lossy_compressed' in tags:
dec_tk, decompressed_nparray = self.tensor_codec.decompress(
tensor_key,
data=raw_bytes,
transformer_metadata=metadata,
require_lossless=False
)
dec_name, dec_origin, dec_round_num, dec_report, dec_tags = dec_tk
if type(dec_tags) == str:
new_tags = tuple([dec_tags] + [collaborator_name])
else:
new_tags = tuple(list(dec_tags) + [collaborator_name])
# layer.agg.n.trained.delta.lossy_decompressed.col_i
decompressed_tensor_key = TensorKey(
dec_name, dec_origin, dec_round_num, dec_report, new_tags
)
if 'delta' in tags:
base_model_tensor_key = TensorKey(
tensor_name, origin, round_number, report, ('model',)
)
base_model_nparray = self.tensor_db.get_tensor_from_cache(
base_model_tensor_key
)
if base_model_nparray is None:
raise ValueError(f'Base model {base_model_tensor_key} not present in TensorDB')
final_tensor_key, final_nparray = self.tensor_codec.apply_delta(
decompressed_tensor_key,
decompressed_nparray, base_model_nparray
)
else:
final_tensor_key = decompressed_tensor_key
final_nparray = decompressed_nparray
assert (final_nparray is not None), f'Could not create tensorkey {final_tensor_key}'
self.tensor_db.cache_tensor({final_tensor_key: final_nparray})
self.logger.debug(f'Created TensorKey: {final_tensor_key}')
return final_tensor_key, final_nparray
|
[
"def _extract_tensor_metadata(result: torch.Tensor) -> TensorMetadata:\n shape = result.shape\n dtype = result.dtype\n requires_grad = result.requires_grad\n stride = result.stride()\n\n memory_formats = {\n torch.contiguous_format,\n torch.channels_last,\n torch.channels_last_3d,\n }\n\n memory_format = None\n\n for query_format in memory_formats:\n if result.is_contiguous(memory_format=query_format):\n memory_format = query_format\n break\n\n is_quantized = result.is_quantized\n qscheme = None\n q_scale = None\n q_zero_point = None\n\n if is_quantized:\n qscheme = result.qscheme()\n\n if qscheme in {torch.per_tensor_affine, torch.per_tensor_symmetric}:\n q_scale = result.q_scale()\n q_zero_point = result.q_zero_point()\n\n return TensorMetadata(\n shape, dtype, requires_grad, stride, memory_format, is_quantized, qscheme, q_scale, q_zero_point)",
"def __extract_fn(self, tfrecord):\n feature_description = {\n 'image': tf.io.FixedLenFeature([], tf.string),\n 'label': tf.io.FixedLenFeature([], tf.int64),\n 'size': tf.io.FixedLenFeature([2], tf.int64)\n }\n # Extract the data record\n sample = tf.io.parse_single_example(tfrecord, feature_description)\n image = tf.io.decode_image(sample['image'], channels=3)\n image = tf.reshape(image, [sample['size'][0], sample['size'][1], 3]) # TODO this line should be useless ?\n label = sample['label']\n return (image, label)",
"def decode_batch(batch, inputs):\n inputs['filename'] = batch[0]\n inputs['image'] = batch[1]\n inputs['image_size'] = batch[2]\n inputs['center2d'] = batch[3]\n inputs['center3d'] = batch[4]\n inputs['box_dim2d'] = batch[5]\n inputs['box_dim3d'] = batch[6]\n inputs['rotation'] = batch[7]\n inputs['rt'] = tf.reshape(batch[9], [-1, 3, 4])\n inputs['k'] = tf.reshape(batch[10], [-1, 3, 3])",
"def get_tensor_info(self, uid: str):\n value, order = self.tensors[uid]\n return (value.shape, order)",
"def _load_initial_tensors_from_dict(self, tensor_dict):\n tensor_key_dict = {\n TensorKey(k, self.uuid, self.round_number, False, ('model',)):\n v for k, v in tensor_dict.items()\n }\n # all initial model tensors are loaded here\n self.tensor_db.cache_tensor(tensor_key_dict)\n self.logger.debug(f'This is the initial tensor_db: {self.tensor_db}')",
"def _record_summary_tensor_data():\n summary_list = list()\n for data in debug_ops.SUMMARY_TENSOR_CACHE:\n check_summary_param(data[0], data[1], data[2])\n if data[0] == \"TensorSummary\":\n summary_op_name = data[1] + \"[:Tensor]\"\n elif data[0] == \"ScalarSummary\":\n summary_op_name = data[1] + \"[:Scalar]\"\n elif data[0] == \"ImageSummary\":\n summary_op_name = data[1] + \"[:Image]\"\n elif data[0] == \"HistogramSummary\":\n summary_op_name = data[1] + \"[:Histogram]\"\n summary_value = {\n \"name\": summary_op_name,\n \"data\": data[2]\n }\n summary_list.append(summary_value)\n _cache_summary_tensor_data(summary_list)\n debug_ops.SUMMARY_TENSOR_CACHE = []",
"def prepare_tensor_dict(g, data, name):\n return {\n key: prepare_tensor(g, val, '{}[\"{}\"]'.format(name, key))\n for key, val in data.items()\n }",
"def _decode_record(record, name_to_features):\n # example = tf.parse_single_example(record, name_to_features)\n example = tf.io.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.cast(t, tf.int32)\n example[name] = t\n\n return example",
"def load_fieldinfo(self, preprocess = True, columns = None, isindict = None, show_progress=False):\n if show_progress:\n show_progress='Loading Field Info'\n if preprocess and os.path.exists(os.path.join(self.path2database, 'fieldinfo')):\n return pd.read_hdf(os.path.join(self.path2database, 'fieldinfo', 'fieldnames.hdf'))\n else:\n return self.parse_fields()",
"def dump_tensor(context, tensor, reshape=None, source_info=None):\r\n np_buf = context.get_value(tensor)\r\n raw_indice = tensor.indice.indice\r\n buf = Print.get_tensor(context, np_buf, raw_indice, reshape)\r\n if source_info:\r\n print(source_info)\r\n\r\n print(tensor.name + '.data (id:{}):\\n'.format(id(buf)) + str(buf))\r\n print(tensor.name + '.shape:' + str(buf.shape) + ' dtype=' +\r\n str(buf.dtype))",
"def fetch(self, name):\n if not isinstance(name, string_types):\n raise TypeError('Tensor name must be a string.')\n if not self.has(name):\n raise RuntimeError(\"Can't find tensor: {}\".format(name))\n\n return self._data[name]",
"def _decode_record(record, name_to_features):\n\texample = tf.parse_example(record, name_to_features)\n\treturn example",
"def __call__(self, *indices):\n return Tensor(self.name, list(indices), is_field=True,\n num_of_der=0, dimension=self.dimension,\n statistics=self.statistics)",
"def __init_tensor_register(self):\n self.tensors = dict()",
"def __get_tensors(self, tensors_idx_list):\n return_list = list()\n for tensor_idx in tensors_idx_list:\n if tensor_idx < 0:\n return_list.append(TensorWrapper(tensor_idx, 0, 0))\n continue\n\n tensor = self.subgraph.Tensors(tensor_idx)\n buffer_idx = tensor.Buffer()\n buffer = self.model.Buffers(buffer_idx)\n\n # Check if the tensors are quantized. Parse if yes.\n qnn_params = None\n tflite_qnn_params = tensor.Quantization()\n if tflite_qnn_params is not None:\n # TFLite supports both per-tensor and per-axis (aka channel) quantization. For\n # per-tensor quantization, scale and zero points are scalar values. For per-axis\n # quantization, scale and zero points for the weights are tensors (activations are\n # per-tensor quantized). However, the TFLite quantization spec puts restrictions on\n # zero points for per-axis quantization. Specifically, the zero point is a tensor\n # but all values are 0. More information can be found here -\n # https://www.tensorflow.org/lite/performance/quantization_spec\n\n tflite_scale = tflite_qnn_params.ScaleAsNumpy()\n tflite_zero_point = tflite_qnn_params.ZeroPointAsNumpy()\n is_qnn_params_valid = True\n\n # Handle Per-axis and per-tensor cases\n if isinstance(tflite_scale, np.ndarray):\n assert isinstance(tflite_zero_point, np.ndarray)\n\n # Tensor - Per-axis quantization\n if tflite_scale.size != 1 and tflite_zero_point.size != 1:\n scale = tflite_scale\n # Ensure that all zero points are zeros\n zero_point = tflite_zero_point\n if not np.all(zero_point == 0):\n pass\n zero_point = int(zero_point[0])\n\n # Scalar - Per-tensor quantization\n elif tflite_scale.size == 1 and tflite_zero_point.size == 1:\n scale = float(tflite_scale[0])\n zero_point = int(tflite_zero_point[0])\n\n else:\n raise NotImplementedError(\n \"Quantized type {} (scale) and {} (zero point) not supported\".format(\n type(tflite_scale), type(tflite_zero_point)\n )\n )\n elif tflite_scale == 0 and tflite_zero_point == 0:\n # Handle corner case for ops like quantized reshape whose second operand (shape)\n # has zero scale and zero zero point. This is not used.\n is_qnn_params_valid = False\n else:\n raise NotImplementedError(\n \"Quantized type {} not supported\".format(\n type(tflite_scale))\n )\n\n # Check that the scale and zero points are valid.\n if is_qnn_params_valid:\n qnn_params = dict()\n qnn_params[\"scale\"] = scale\n qnn_params[\"zero_point\"] = zero_point\n return_list.append(TensorWrapper(\n tensor_idx, tensor, buffer, qnn_params))\n return return_list",
"def print_tensors_in_checkpoint_file(file_name, tensor_name, all_tensors,\n all_tensor_names=False):\n\n npyvar = {}\n try:\n reader = pywrap_tensorflow.NewCheckpointReader(file_name)\n if all_tensors or all_tensor_names:\n var_to_shape_map = reader.get_variable_to_shape_map()\n for key in sorted(var_to_shape_map):\n if key in varkeys:\n print(\"tensor_name: \", key)\n if 'conv' in key:\n keysplit = key.split('/')\n if keysplit[0] not in npyvar.keys():\n npyvar[keysplit[0]] = {}\n if keysplit[1] == 'kernels':\n npyvar[keysplit[0]]['weights'] = np.array(reader.get_tensor(key)).transpose(3,2,0,1)\n if keysplit[1] == 'biases':\n npyvar[keysplit[0]]['biases'] = np.array(reader.get_tensor(key))\n else:\n keysplit = key.split('/')\n if (keysplit[0]+'/'+keysplit[1]) not in npyvar.keys():\n npyvar[keysplit[0]+'/'+keysplit[1]] = {}\n if keysplit[2] == 'kernels':\n npyvar[keysplit[0]+'/'+keysplit[1]]['weights'] = np.array(reader.get_tensor(key)).transpose(3,2,0,1)\n if keysplit[2] == 'biases':\n npyvar[keysplit[0]+'/'+keysplit[1]]['biases'] = np.array(reader.get_tensor(key))\n #else:\n # if all_tensors:\n # print(np.array(reader.get_tensor(key)).transpose(3,2,0,1)[0,:,:,:])\n elif not tensor_name:\n print(reader.debug_string().decode(\"utf-8\"))\n else:\n print(\"tensor_name: \", tensor_name)\n print(reader.get_tensor(tensor_name))\n except Exception as e: # pylint: disable=broad-except\n print(str(e))\n if \"corrupted compressed block contents\" in str(e):\n print(\"It's likely that your checkpoint file has been compressed \"\n \"with SNAPPY.\")\n if (\"Data loss\" in str(e) and\n (any([e in file_name for e in [\".index\", \".meta\", \".data\"]]))):\n proposed_file = \".\".join(file_name.split(\".\")[0:-1])\n v2_file_error_template = \"\"\"\n It's likely that this is a V2 checkpoint and you need to provide the filename\n *prefix*. Try removing the '.' and extension. Try:\n inspect checkpoint --file_name = {}\"\"\"\n print(v2_file_error_template.format(proposed_file))\n np.save('/home/siiva/RUDY/SIIVA/GoalCam/annotations/shanghaioffice/ALL/models/train/squeezedet_half.npy', npyvar)",
"def _parse_record(example_proto):\n\n example = tf.parse_single_example(example_proto, feature)\n im = tf.decode_raw(example['image'], tf.float32)\n im = tf.reshape(im, (img_rows, img_cols, 1))\n\n label = tf.decode_raw(example['label'], tf.int32)\n label = tf.reshape(label, (4, 1))\n\n return (im, label)",
"def batch_namedtuples(namedtuples):\n if not namedtuples:\n return namedtuples\n\n nt_type = type(namedtuples[0])\n dicts = [nt._asdict() for nt in namedtuples]\n batched_dict = batch_feature_dicts(dicts)\n return nt_type(**batched_dict)",
"def VtuTensorFieldNames(vtu):\n \n resultFieldNames = []\n for fieldName in vtu.GetFieldNames():\n if VtuFieldRank(vtu, fieldName) == 2:\n resultFieldNames.append(fieldName)\n \n return resultFieldNames"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check if the round complete. If so, perform many end of round operations, such as model aggregation, metric reporting, delta generation (+ associated tensorkey labeling), and save the model
|
def _end_of_round_check(self):
if not self._is_round_done():
return
# Compute all validation related metrics
all_tasks = self.assigner.get_all_tasks_for_round(self.round_number)
for task_name in all_tasks:
self._compute_validation_related_task_metrics(task_name)
# Once all of the task results have been processed
# Increment the round number
self.round_number += 1
# Save the latest model
self.logger.info(f'Saving round {self.round_number} model...')
self._save_model(self.round_number, self.last_state_path)
# TODO This needs to be fixed!
if self._time_to_quit():
self.logger.info('Experiment Completed. Cleaning up...')
else:
self.logger.info(f'Starting round {self.round_number}...')
# Cleaning tensor db
self.tensor_db.clean_up(self.db_store_rounds)
|
[
"def save_model(self):\n torch.save(\n {\n 'epoch': self.epoch,\n 'model_state_dict': self.model.state_dict(),\n 'optimizer_state_dict': self.opt.state_dict(),\n 'acc': self.val_stats[\"acc\"],\n }, os.path.join(self.params.model_dir,\"snapshot.ep{}.pth\".format(self.epoch)))\n if self.val_stats[\"best_acc\"] <= self.val_stats[\"acc\"]:\n self.val_stats[\"best_acc\"] = self.val_stats[\"acc\"]\n self.val_stats[\"best_epoch\"] = self.epoch\n print(\"Saving model after epoch {}\".format(self.epoch))\n torch.save(\n {\n 'epoch': self.epoch,\n 'model_state_dict': self.model.state_dict(),\n 'acc': self.val_stats[\"acc\"],\n }, os.path.join(self.params.model_dir,\"model.acc.best\"))\n #else:\n # checkpoint = torch.load(os.path.join(self.params.model_dir,\"model.acc.best\"))\n # self.model.load_state_dict(checkpoint[\"model_state_dict\"])",
"def end_training(self):\n save_model = True\n if self.scheduler.num_bad_epochs >= self.scheduler.patience:\n self.num_bad_epochs += 1\n save_model = False\n if self.num_bad_epochs >= self.early_stopping_criteria:\n print (\"\\nEnding training early!\")\n return True\n else:\n if save_model:\n self.save(self.model_filepath)\n return False",
"def on_average_best_models_validation_end(self, context: PhaseContext) -> None:\n pass",
"def _end_training_statistics(self):\n\n # !Don't update execution state metrics here - they will be updated in\n # !on_epoch_end\n\n # update tuner overall objective metric\n for metric in self.instance_state.agg_metrics.to_list():\n improved = self.tuner_state.agg_metrics.update(\n metric.name, metric.get_best_value())\n\n if metric.name == self.tuner_state.objective and improved:\n self.instance_state.is_best_model = True\n\n # record which one is the best model\n # ! dont try to simplify - must be after all statistics are computed\n if self.instance_state.is_best_model or not self.tuner_state.best_instance_config: # nopep8\n config = self.instance_state.to_config()\n self.tuner_state.best_instance_config = config\n\n # record execution config in instance\n self.instance_state.execution_states_collection.add(\n self.execution_state.idx, self.execution_state) # nopep8",
"def save_models(self, finally_epoch: int):\n for key, v in self.models_dict.items():\n save_path = os.path.join(self.summary.write_dir,\n f'{key}-{finally_epoch}.h5')\n if isinstance(v, k.Model):\n k.models.save_model(v, save_path)\n print(INFO, f'Save {key} as {save_path}')",
"def on_step_end(self, episode_step, logs):\n self.step += 1\n self.loss.append(logs.get(\"metrics\")[0])\n\n if not self.step % self.interval:\n y_pred = make_predictions(self.model.target_model, self.X_val)\n stats = calculate_metrics(self.y_val, y_pred)\n\n if np.isnan(self.loss).all(): # If all entries are NaN, this happens during training\n stats[\"loss\"] = 0\n else:\n stats[\"loss\"] = np.nanmean(self.loss)\n self.loss = [] # Reset loss every `self.interval`\n\n for k, v in stats.items():\n summary = Summary(value=[Summary.Value(tag=k, simple_value=v)])\n self.writer.add_summary(summary, global_step=self.step)\n\n if stats.get(\"FN\") <= self.FN_bound and stats.get(\"FP\") <= self.FP_bound and self.step >= self.save_after:\n print(f\"Model saved! FN: {stats.get('FN')}; FP: {stats.get('FP')}\")\n self.model.target_model.save(f\"./models/{datetime.now().strftime('%Y%m%d')}_FN{stats.get('FN')}_FP{stats.get('FP')}.h5\")",
"def fit_n_save_roc(unfitted_model, dbname, model_folder=\"models\",\n roc_folder=\"tmp\", n_test_pairs=PARAMS[\"n_test_pairs\"]):\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n\n logging.basicConfig(stream=sys.stdout, level=logging.DEBUG,\n format='%(levelname)s - %(asctime)s - %(message)s',\n datefmt='%d/%m %I:%M:%S')\n # logging.basicConfig(level=logging.DEBUG,\n # filename='experiments.log',\n # format='%(asctime)s - %(message)s')\n print(\"## Loading data... - \" + datetime.now().ctime())\n X_train, y_train, X_test, y_test = load_preprocess_data(dbname=dbname)\n logging.debug(\"Dimensionality of X_train: %s x %s\",\n X_train.shape[0], X_train.shape[1])\n logging.debug(\"Dimensionality of X_test: %s x %s\",\n X_test.shape[0], X_test.shape[1])\n\n print(\"### Fitting the model... - \" + datetime.now().ctime())\n model = unfitted_model\n model.fit(X_train, y_train)\n\n print(\"### Preparing paths... - \" + datetime.now().ctime())\n # Making output folders\n if not os.path.exists(\"{}/\".format(model_folder)):\n os.makedirs(\"{}/\".format(model_folder))\n\n if not os.path.exists(\"{}/\".format(roc_folder)):\n os.makedirs(\"{}/\".format(roc_folder))\n\n # Finding right path names\n cur_i = -1\n model_filename, score_filename, z_filename = [\".\"]*3\n while cur_i < 20:\n if not(os.path.exists(model_filename)\n or os.path.exists(score_filename)\n or os.path.exists(z_filename)):\n print(\"### Saving as model {}... - \".format(cur_i)\n + datetime.now().ctime())\n break\n cur_i += 1\n model_filename = \"{}/model_{}_{}_{}\".format(model_folder, dbname,\n model.name, cur_i)\n score_filename = \"{}/scores_{}_{}_{}.npy\".format(roc_folder, dbname,\n model.name, cur_i)\n z_filename = \"{}/z_{}_{}_{}.npy\".format(roc_folder, dbname,\n model.name, cur_i)\n\n print(\"### Saving the model... - \" + datetime.now().ctime())\n # Will work only for certain types of models.\n save_model(model_filename, model.ul_model)\n\n print(\"### Saving the ROC curve... - \" + datetime.now().ctime())\n X_test_pairs, z_test_pairs = get_random_pairs(X_test, y_test, n_test_pairs)\n s_test = model.score(X_test_pairs)\n s_test.dump(score_filename)\n z_test_pairs.dump(z_filename)",
"def test_save_and_load(self):\n\n with test_util.TempDirectory() as f:\n self.model.save(f)\n self.model = tc.load_model(f)\n loaded_model = tc.load_model(f)\n\n self.test__list_fields()\n print(\"Saved model list fields passed\")\n\n self.test_get()\n print(\"Saved model get passed\")\n\n self.test_summaries()\n print(\"Saved model summaries passed\")",
"def save_best_model(self, _ = None, __ = None):\n max_val_blue = max(self.metrics_collector.val_blue_scores) \n last_val_blue = self.metrics_collector.val_blue_scores[-1]\n if last_val_blue == max_val_blue:\n torch.save(self.model, self.fpath_model)\n print (f\"best model so far saved\")\n else:\n print()",
"def main(self):\r\n\r\n fullStart = timer()\r\n self.train()\r\n self.discriminatorModel.save(fr\"C:\\Coding\\Models\\ganModels\\HighRes\\DISC\\chunk{self.trainingChunk+1}of{self.chunks}\"+\r\n fr\"landscapeDISC{self.fullXRes}x{self.fullYRes}res-{self.EPOCHS}epochs-{self.latentSize}latent.model\")\r\n self.generatorModel.save(fr\"C:\\Coding\\Models\\ganModels\\HighRes\\GEN\\chunk{self.trainingChunk+1}of{self.chunks}\"+\r\n fr\"landscapeGEN{self.fullXRes}x{self.fullYRes}res-{self.EPOCHS}epochs-{self.latentSize}latent.model\")\r\n fullStop = timer()\r\n print(f\"Training {self.EPOCHS} epochs finished in: {str(datetime.timedelta(seconds=int(fullStop-fullStart)))}\")",
"def reach_termination_criteria(self, curr_round):\n \n if curr_round >= self.rounds:\n logger.info('Reached maximum global rounds. Finish training :) ')\n return True\n\n return self.terminate_with_metrics(curr_round)",
"def mt_save(self, epoch, loss):\n if self.opt.SAVE_BEST_MODEL and loss < self.best_loss:\n log(\"Your best model is renewed\")\n if len(self.threads) > 0:\n self.threads[-1].join()\n self.threads.append(MyThread(self.opt, self, epoch, self.best_loss, loss))\n self.threads[-1].start()\n if self.opt.SAVE_BEST_MODEL and loss < self.best_loss:\n log(\"Your best model is renewed\")\n self.best_loss = loss",
"async def process_epoch(self) -> bool:\n\n # Any calculations done within the current epoch would be included here.\n # Also sending of any result messages (other than Status message) would be included here.\n return True # only if the component is done for the current epoch\n # return False # if the component still has things to do within the current epoch",
"def _save_model(self, out_file):\n pass",
"def on_batch_end(self, trainer: Trainer, _):\n epoch = trainer.current_epoch\n global_step = trainer.global_step\n if global_step % self.save_step_frequency == 0:\n self.keep_newest_checkpoint()\n file_path = f\"{self.checkpoints_dir}/every={self.save_step_frequency}_epoch={epoch}_step={global_step}.ckpt\"\n trainer.save_checkpoint(file_path)",
"def _try_finish_step(self):\n LOG.log(\n INFO_FINE,\n \"Can finish step? (TS=%s,CAD=%s,NASPD=%d/%d)\",\n bool(self.timestep),\n self.flag_create_agent_done,\n self.num_agent_step_profile_done,\n WORLD_SIZE,\n )\n if self.timestep is None:\n return\n if not self.create_agent_done:\n return\n if self.num_agent_step_profile_done < WORLD_SIZE:\n return\n\n self.simulator_proxy.coordinator_done()\n self._write_summary()\n self._prepare_for_next_step()",
"def finish(self):\n logger = logging.getLogger(\"optimize\")\n self.tabular.close()\n self.ran = True\n opt_pars = \"\\n\".join(\n \" {0}={1:12.6E}\".format(name, self.xopt[i])\n for (i, name) in enumerate(self.names)\n )\n opt_time = self.timing[\"end\"] - self.timing[\"start\"]\n summary = \"\"\"\nSummary of optimization results\n------- -- ------------ -------\n{0}: calculations completed ({1:.4f}s.)\nIterations: {2}\nOptimized parameters\n{3}\n\"\"\".format(\n self.job, opt_time, IOPT, opt_pars\n )\n logger.info(summary)\n\n # write out optimized params\n with open(os.path.join(self.rootd, \"params.opt\"), \"w\") as fobj:\n for (i, name) in enumerate(self.names):\n fobj.write(\"{0} = {1: .18f}\\n\".format(name, self.xopt[i]))\n environ.parent_process = 0\n\n # Link directory 'final' to the last evaluation directory\n os.symlink(\n os.path.relpath(LASTEVALD, start=self.rootd),\n os.path.join(self.rootd, \"final\"),\n )\n\n if environ.notebook:\n print(\"\\nDone\")",
"def run(self):\n\t\tassert self.model, \"Must set model before running\"\n\t\tassert self.featureMatrix.any(), \"Must extract features before running\"\n\n\t\tfor numHoldout in range(self.maxHoldout, self.maxHoldout + 1):\n\t\t\tprint \"\\n\\tRunning Holdout: \", str(numHoldout)\n\n\t\t\tnumRounds = float(len(self.featureMatrix) / numHoldout)\n\t\t\t# assert numRounds > 0, \"Holding out too many; 0 examples for training\"\n\n\t\t\t# print \"Examples to Train: \", str(numExamples)\n\t\t\terrors, sumOfScores, rankedExamples = Counter(), 0, []\n\n\t\t\tfor i in range(int(numRounds)):\n\t\t\t\tprint \"Round\", str(i + 1)\n\t\t\t\tholdout = i * numHoldout\n\t\t\t\tfinalHoldout = holdout + numHoldout\n\t\t\t\t# print '...removing example(s): ', range(holdout, finalHoldout)\n\t\t\t\tholdouts = self.featureMatrix[holdout:finalHoldout]\n\t\t\t\tholdoutLabels = self.labels[holdout:finalHoldout]\n\t\t\t\ttrainExamples = vstack([self.featureMatrix[:holdout], \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.featureMatrix[finalHoldout:]])\n\t\t\t\ttrainLabels = self.labels[:holdout] + self.labels[finalHoldout:]\t\t\t\t\t\t\t\n\n\t\t\t\t# print '...scoring: ' \n\t\t\t\tself.model.fit(trainExamples, trainLabels)\n\t\t\t\tcurrScore = self.model.score(holdouts, holdoutLabels)\n\t\t\t\tsumOfScores += currScore\n\t\t\t\t\n\n\t\t\t\t# print '...calculating details'\n\t\t\t\tpredicted_labels = self.model.predict(holdouts)\n\n\t\t\t\tfor j,pred in enumerate(predicted_labels):\n\t\t\t\t\tif not pred == holdoutLabels[j]:\n\t\t\t\t\t\terrors[holdoutLabels[j]] += 1\n\n\n\t\t\t\tif numHoldout == 1: \n\t\t\t\t\trankedExamples.append((self.model.predict_proba(holdouts)[0], \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.fileNames[i], \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpredicted_labels == self.labels[i]))\t\t\n\t\t\tresults = ''\n\t\t\tfor i, c in enumerate(self.classNames):\n\t\t\t\tmissRate = str(float(errors[i]) / self.classCounts[i])\n\t\t\t\tresults += '\\t' + c + ' error: ' + missRate + '\\n'\n\n\t\t\tresults += '\\tAverage Score: ' + str(sumOfScores / numRounds)\n\t\t\tfileName = 'results/scores/LRholdout_' + str(numHoldout)\n\t\t\tfor param in self.model_params:\n\t\t\t\tfileName += '_' + str(param)\n\t\t\tfileName += '.txt'\n\t\t\twith open(fileName, 'w') as f:\n\t\t\t\tf.write(results)\n\t\t\tprint results\n\n\n\t\t\tprint '..ranking examples'\n\t\t\tif len(rankedExamples):\n\t\t\t\t#most democrat will appear first in file\n\t\t\t\texamples = sorted(rankedExamples, key=lambda e: e[0][1]) \n\t\t\t\tfileName = 'results/rankedExamples/LRholdout_' + str(numHoldout)\n\t\t\t\tfor param in self.model_params:\n\t\t\t\t\tfileName += '_' + str(param)\n\t\t\t\tfileName += '.txt'\n\t\t\t\twith open(fileName,'w') as f:\n\t\t\t\t\tfor e in examples:\n\t\t\t\t\t\tresults = e[1]\n\t\t\t\t\t\tresults += '\\n\\t Probability of class '\n\t\t\t\t\t\tresults += self.classNames[0] + ': '\n\t\t\t\t\t\tresults += str(e[0][0])\n\t\t\t\t\t\tresults += '\\n\\t Correct: ' + str(e[2]) + '\\n'\n\t\t\t\t\t\tf.write(results)",
"def epoch_finished(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check that round is done.
|
def _is_round_done(self):
tasks_for_round = self.assigner.get_all_tasks_for_round(
self.round_number
)
return all([self._is_task_done(t) for t in tasks_for_round])
|
[
"def _end_of_round_check(self):\n if not self._is_round_done():\n return\n\n # Compute all validation related metrics\n all_tasks = self.assigner.get_all_tasks_for_round(self.round_number)\n for task_name in all_tasks:\n self._compute_validation_related_task_metrics(task_name)\n\n # Once all of the task results have been processed\n # Increment the round number\n self.round_number += 1\n\n # Save the latest model\n self.logger.info(f'Saving round {self.round_number} model...')\n self._save_model(self.round_number, self.last_state_path)\n\n # TODO This needs to be fixed!\n if self._time_to_quit():\n self.logger.info('Experiment Completed. Cleaning up...')\n else:\n self.logger.info(f'Starting round {self.round_number}...')\n\n # Cleaning tensor db\n self.tensor_db.clean_up(self.db_store_rounds)",
"def test_next_round_complete(self):\n lesson = Lesson(self.student)\n lesson.level = 3\n lesson.round_num = len(levels[3].rounds)\n lesson.next_round()\n # assert lesson.level == 4\n assert lesson.complete",
"def is_done(self) -> bool:\n n_completed = 0\n final_depth = self._get_depth_of(self.fidelities[-1])\n for trial in self.lineages.get_trials_at_depth(final_depth):\n n_completed += int(trial.status == \"completed\")\n\n return n_completed >= self.population_size",
"def check_finished(self):\n if self.max_iterations == -1:\n return False\n return self.iterations >= self.max_iterations",
"def _check_round_has_still_trumps(self, rnd: PlayerRound) -> bool:\n if score.get_missing_cards(rnd.tricks)[(rnd.trump*9):(rnd.trump*9)+9].sum() != 0:\n return True\n return False",
"def is_done(state):\n winner = calc_winner(state)\n return is_draw(state) or winner != 0, winner",
"def roundEnd(self):\n self.end_of_round.play()\n\n if self.cur_round == 1:\n for p in self.players:\n p.roundEnd()\n self.startRound(2)\n else:\n self.gameEnd()",
"def passed(self):\n if self.wobble.radius_mm * 2 < self.tolerance.value:\n return True\n else:\n return False",
"def done(self):\n return hasattr(self, \"_duration\")",
"def has_finished(grid):\n\n if not get_cell_count(grid) and grid.generation > 0:\n return True\n\n return False",
"def get_step_done(self, car_stats) -> bool:\n done = False\n\n if car_stats['time'] > 1200:\n return True\n\n for item in self._settings_done['true_flags_to_done']:\n if car_stats[item]:\n done = True\n break\n\n return done",
"def is_done(self, operation):\n raise Exception(\"PollThread.is_done(operation) not implemented\")",
"def _try_finish_step(self):\n LOG.log(\n INFO_FINE,\n \"Can finish step? (TS=%s,CAD=%s,NASPD=%d/%d)\",\n bool(self.timestep),\n self.flag_create_agent_done,\n self.num_agent_step_profile_done,\n WORLD_SIZE,\n )\n if self.timestep is None:\n return\n if not self.create_agent_done:\n return\n if self.num_agent_step_profile_done < WORLD_SIZE:\n return\n\n self.simulator_proxy.coordinator_done()\n self._write_summary()\n self._prepare_for_next_step()",
"def check_fin(self):\n for ball in self.current_state.balls:\n if ball.position[1] + ball.radius > self.end_line:\n return True\n return False",
"def is_finished(self):\n return len(self.legalMoves) == 0",
"def testResultDone(self):\n ray.init(num_cpus=1, num_gpus=1)\n runner = TrialRunner(BasicVariantGenerator())\n kwargs = {\n \"stopping_criterion\": {\n \"training_iteration\": 2\n },\n \"resources\": Resources(cpu=1, gpu=1),\n }\n runner.add_trial(Trial(\"__fake\", **kwargs))\n trials = runner.get_trials()\n\n runner.step()\n self.assertEqual(trials[0].status, Trial.RUNNING)\n runner.step()\n self.assertNotEqual(trials[0].last_result[DONE], True)\n runner.step()\n self.assertEqual(trials[0].last_result[DONE], True)",
"def round_end(self, hooker):\r\n pass",
"def isHacktoberfestCompleted(countOfPR):\n\n if (countOfPR < 4):\n print(\"You have incomplete PR's, let me do it for you\")\n while(countOfPR < 4):\n countOfPR = makePR(countOfPR)\n time.sleep(2)\n print(\"\\nYou have successfully completed 4 PR's :)\")\n return True\n return False",
"def is_game_finished(self):\n return len(self._possible_moves) == 0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The vocabulary should have 121 items.
|
def test_vocabulary_size(self):
self.assertEqual(len(frompcset), 121)
|
[
"def vocab_size(self) -> int:",
"def vocabulary_length(self) -> int:\n pass",
"def generate_vocabulary(self):\n print(' >>> Generating vocabulary...', end='', flush=True)\n self.vocabulary = {}\n for letter in self.text:\n if letter in self.vocabulary.keys():\n self.vocabulary[letter] += 1\n else:\n self.vocabulary[letter] = 1\n print(ANSI.ok_green, 'OK !', ANSI.endc)",
"def model_vocabulary(self) -> List[str]:",
"def test_javascript_vocabulary():\n vocabulary = javascript.vocabulary\n LENGTH = 101 # includes <UNK>, <s>, </s>\n assert len(vocabulary) == LENGTH\n assert vocabulary.to_text(0) == vocabulary.unk_token\n assert vocabulary.to_text(1) == vocabulary.start_token\n assert vocabulary.to_text(2) == vocabulary.end_token",
"def limitVocab(self, max_size):\n if self.VOCAB_SIZE <= max_size:\n print(f'Current vocab size is {self.VOCAB_SIZE}, no need to decrease size')\n return\n# self.word2index = {}\n# # self.word2count = {}\n# self.index2word = {}\n self.VOCAB_SIZE = max_size\n \n# self.SOS = '<s>'\n# self.EOS = '</s>'\n# self.UNK = '<unk>'\n# self.iSOS = 0\n# self.iEOS = 1\n# self.iUNK = 2\n \n c = Counter(self.word2count)\n m = c.most_common(1)[0][1]\n c[self.PAD] = m + 4\n c[self.SOS] = m + 3\n c[self.EOS] = m + 2\n c[self.UNK] = m + 1\n \n list_of_wc = c.most_common(max_size)\n self.index2word = {i:w for i, (w, _) in enumerate(list_of_wc)}\n self.word2index = {w:i for i, (w, _) in enumerate(list_of_wc)}",
"def get_vocab_length(self):\n return len(self.vocabulary)",
"def __build_vocabulary(self, objects):\n\n vocabulary_index = 0\n\n for indexable in objects:\n\n for word in indexable.words_generator(self.stop_words):\n\n word = eng_stemmer.stem(word)\n\n if word not in self.vocabulary:\n\n self.vocabulary[word] = vocabulary_index\n\n vocabulary_index += 1",
"def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size):\n\n print(\"In create_vocabulary\")\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s from data %s\" % (vocabulary_path, data_path))\n vocab = {}\n with gfile.GFile(data_path, mode=\"r\") as f:\n counter = 0\n for line in f:\n counter += 1\n if counter % 100000 == 0:\n print(\"processing line %d\" % counter)\n text_conversation =line.strip().split(\"\\t\")\n \n txt = text_conversation[0].strip() + \" \" + text_conversation[1].strip() + \" \" + text_conversation[2].strip()\n\n tokens = txt.split()\n for w in tokens:\n word = w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n\n\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)\n print(\"vocab_length={0}\".format(len(vocab_list)))\n\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n\n with gfile.GFile(vocabulary_path, mode=\"w\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + \"\\n\")",
"def learn_vocab(self, docs): \n self.vectorizer.fit(docs)",
"def _get_text_vocabulary(cls):\n # this is an abstract method, concrete implentations are provided in the subclasses\n pass",
"def __init__(self, vocab):\n self.vocab = vocab",
"def create_vocabulary(training_directory, cutoff):\n # holds a list of vocabulary among both training sets\n vocabulary = []\n word_count_bow = []\n\n # Traverse training_directory / 2016 (not including test)\n\n training_labels = os.listdir(training_directory)\n\n # Traverse each training label\n for label in training_labels:\n # get the training set from that label\n training_set = os.listdir(training_directory + \"/\" + label)\n\n # for survey response in the training set\n for response in training_set:\n # get the path\n response_file_path = training_directory + \"/\" + label + \"/\" + response\n # open the response\n response_file = open(response_file_path, encoding='utf-8')\n # for each line in the response\n for line in response_file:\n if \"\\n\" in line:\n # remove the trailing '\\n'\n line = line[:-1]\n duplicate_word = False\n # compare it with each line of vocabulary\n for word_index in range(len(vocabulary)):\n if vocabulary[word_index] == line:\n # a previously found word type has appeared again; increment count\n word_count_bow[word_index] += 1\n duplicate_word = True\n break\n if not duplicate_word:\n # add the new word type to the vocabulary and set its count to 1\n vocabulary.append(line)\n word_count_bow.append(1)\n\n # Parse the list according to the cutoff\n vocabulary_length = len(vocabulary)\n word_index = 0\n while word_index < vocabulary_length:\n if word_count_bow[word_index] < cutoff:\n del word_count_bow[word_index]\n del vocabulary[word_index]\n # vocabulary is one element shorter\n vocabulary_length -= 1\n # since vocabulary shortened, word_index + 0 moves to the next element of the list\n else:\n # cut the \"\\n\" off of each string, if applicable\n if \"\\n\" in vocabulary[word_index]:\n vocabulary[word_index] = vocabulary[word_index][:-1]\n # move to the next element of the list\n word_index += 1\n\n # sort the final vocabulary\n vocabulary.sort()\n\n return vocabulary",
"def extend_vocab(self, path):\n for words, _, _ in Tokenizer().generate_samples(path):\n for word in words:\n if word not in self.vocab_input:\n self.vocab_input[word] = self.ru_model.get_word_vector(word)",
"def get_vocabulary():\n r = requests.get(BASE_URL + '12586/vocab', auth=(AUTH[0], AUTH[2]))\n if r.status_code == requests.codes.ok:\n return r.json()\n else:\n return None",
"def _create_vocab():\n # Create vocabulary dictionary\n vocab_dict = {}\n\n # Blank token\n idx = 0\n vocab_dict['-'] = idx\n\n # 0-9\n for i in range(ord('9') - ord('0') + 1):\n idx += 1\n vocab_dict[chr(ord('0') + i)] = idx\n\n # a-z\n for i in range(ord('z') - ord('a') + 1):\n idx += 1\n vocab_dict[chr(ord('a') + i)] = idx\n\n # Create vocabulary object\n vocab = Vocabulary(vocab_dict)\n\n return vocab",
"def add_vocab_from_Recipe1M(self):\n if self.recipe1M:\n for word in tqdm(self.vocab_model.index2word):\n self.word2idx[word] = self.idx\n self.idx2word[self.idx] = word\n self.idx += 1",
"def build_vocab(self):\n # Create a dictionary that maps words to their count\n self.word_count = self.word2count()\n\n # Trim the vocabulary\n # Get rid of out-of-vocabulary words from the dataset\n if self.min_word_count or self.max_vocab_size:\n self.trimVocab()\n self.trimDatasetVocab()\n\n # Trim sequences in terms of length\n if self.max_seq_len:\n if self.x_lengths:\n self.trimSeqLen()\n\n else:\n # Calculate sequences lengths\n self.x_lengths = [len(seq.split()) for seq in self.dataset[:, 0]]\n \n if self.target_col:\n self.y_lengths = [len(seq.split()) for seq in self.dataset[:, self.target_col]]\n \n self.trimSeqLen() \n\n \n # Map each tokens to index\n if not self.word2idx_mapping:\n self.mapWord2index()\n \n # Crate index2word mapping\n self.index2word = {index: word for word, index in self.word2index.items()}\n \n # Map dataset tokens to indices\n self.mapWords2indices()\n \n # Create weights matrix based on Glove vectors\n if self.use_pretrained_vectors:\n self.glove_vectors()",
"def build_vocab(self, words, vocab_size):\n count = [(\"UNK\", 0)]\n most_frequent_words = Counter(words).most_common(vocab_size - 1)\n count.extend(most_frequent_words)\n word2index = {}\n index = 0\n\n if self.write_vocab:\n path = os.path.dirname(__file__)\n path = os.path.join(path, 'vocab_1000.tsv')\n f = open(path, \"w\")\n\n for word, _ in count:\n word2index[word] = index\n\n if index < 1000 and self.write_vocab:\n f.write(word + \"\\n\")\n\n index += 1\n\n if self.write_vocab:\n f.close()\n\n index2word = dict(zip(word2index.values(), word2index.keys()))\n return count, word2index, index2word"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The pcsets in the vocabulary and PCSETS should be the same.
|
def test_pcsets_in_vocabulary(self):
pcsets = set(frompcset.keys())
self.assertEqual(pcsets, set(PCSETS))
|
[
"def test_vocabulary_size(self):\n self.assertEqual(len(frompcset), 121)",
"def available_subsets(self):\n return ['train', 'valid']",
"def test_subsets(self):\n t = self.t \n self.assertEqual(t.subsets(), frozenset(\n [frozenset('HG'), frozenset('RM')]))",
"def test_load_all_psets(self):\n resp = self.client.get('/networking/Fall2012/problemsets/', HTTP_USER_AGENT=self.userAgent)\n self.assertEqual(resp.status_code, 200)",
"def numSets(self):\n return self.sets",
"def create_vocabulary_list(self, data_set):\r\n vocabularies_set = set([])\r\n for data in data_set:\r\n vocabularies_set = vocabularies_set | set(data)\r\n return np.array(list(vocabularies_set))",
"def get_vocab(sentences: List[str]) -> Set[str]:\n vocab = set()\n for sentence in sentences:\n words = sentence.split()\n for word in words:\n vocab.add(word)\n return vocab",
"def clearSets(self):\r\n self.matchSet = []\r\n self.correctSet = []",
"def _get_consectuive_word_sets(spacy_text):\n N = len(spacy_text)\n sets = []\n for L in range(1, N+1):\n for i in range(N+1-L):\n sets.append(set(spacy_text[i:i+L]))\n\n return sets",
"def max_sets (self):\n \n raise NotImplementedError",
"def create_sets():\n train_labeled = []\n test_labeled = []\n train_lines, test_lines = read_files()\n word = []\n for line in train_lines:\n data, label, next_id = split_sample(line)\n if next_id == '-1':\n word.append((data, label))\n train_labeled.append(word)\n word = []\n else:\n word.append((data, label))\n word = []\n for line in test_lines:\n data, label, next_id = split_sample(line)\n if next_id == '-1':\n word.append((data, label))\n test_labeled.append(word)\n word = []\n else:\n word.append((data, label))\n\n return train_labeled, test_labeled",
"def test_add_sets_are_kept_equal():\n atom = ATOMClassifier(X_bin, y_bin, index=True, random_state=1)\n train_idx, test_idx = atom.train.index, atom.test.index\n atom.add(Pruner())\n assert all(idx in train_idx for idx in atom.train.index)\n pd.testing.assert_index_equal(test_idx, atom.test.index)",
"def removeSets() :\n\tcleanSet = ['Blocking_Set', 'Proxy_Set', 'Render_Set', 'Anim_Set']\n\tsetGrps = mc.ls(type = 'objectSet')\n\n\tfor eachSet in setGrps : \n\t\tif eachSet in cleanSet : \n\t\t\tmc.delete(eachSet)",
"def vocabs(self) -> Tuple[ControlledVocab]:\n return tuple(self.__vocabs)",
"def __MPCdiffCallSet(self):\n subSet = [ i for i in np.arange(len(self.mostProbableC)) if i != self.mostProbableC[i] ]\n return subSet",
"def test_closest_pcset(self):\n for pcset, pcsGT in pcsetsGT.items():\n pcs = closestPcSet(pcset)\n with self.subTest(pcset=pcset, closest_match=pcsGT):\n self.assertEqual(pcs, pcsGT)",
"def assign_sets(dbsets):\n limits = {'setkeys':[]}\n for setnum in dbsets.keys():\n setnumint = int(setnum) - 1\n limits.update({setnumint:dbsets[setnum]})\n limits['setkeys'].append(setnumint)\n return limits",
"def generate_vocabulary(self):\n print(' >>> Generating vocabulary...', end='', flush=True)\n self.vocabulary = {}\n for letter in self.text:\n if letter in self.vocabulary.keys():\n self.vocabulary[letter] += 1\n else:\n self.vocabulary[letter] = 1\n print(ANSI.ok_green, 'OK !', ANSI.endc)",
"def getVocabAndMasterListFromPicke(filename=\"../data/masterList.p\"):\n masterList = pickle.load(open(filename, \"rb\"))\n masterSentences = [] # by now ignore the relation between sentences\n vocab = set()\n print len(masterList)\n maxMatrixRow = 0\n for prefixAndList in masterList:\n print(\"Prefix is {0}\".format(prefixAndList[0]))\n for featurePrefixAndSublit in prefixAndList[1]:\n print(\"====Feature Prefix: \" + featurePrefixAndSublit[0])\n for sentenceAndFeatures in featurePrefixAndSublit[1]:\n # print len(sentenceAndFeatures)\n print (\"***\")\n # print len(sentenceAndFeatures[0])\n print len(sentenceAndFeatures[1])\n print (\"***\")\n masterSentences.append(sentenceAndFeatures[1]) # ignore sentences relation\n for wordAndFeatures in sentenceAndFeatures[1]:\n vocab.add(wordAndFeatures[0])\n print wordAndFeatures[0], wordAndFeatures[1].shape, type(wordAndFeatures[1])\n # update max row possible\n if maxMatrixRow < wordAndFeatures[1].shape[0]:\n maxMatrixRow = wordAndFeatures[1].shape[0]\n print(\"Feature matrices has the largest row of {0} (for pre-padding purpose)\".format(maxMatrixRow))\n return vocab, masterList, masterSentences"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Verify that you get sensible pcset matches.
|
def test_closest_pcset(self):
for pcset, pcsGT in pcsetsGT.items():
pcs = closestPcSet(pcset)
with self.subTest(pcset=pcset, closest_match=pcsGT):
self.assertEqual(pcs, pcsGT)
|
[
"def test_pcsets_in_vocabulary(self):\n pcsets = set(frompcset.keys())\n self.assertEqual(pcsets, set(PCSETS))",
"def is_set(cards):\n \n res=False\n verifylist=[False]*4 # Creates a list of Flase bool to verify the attributs\n count=0\n\n if len(cards)==3: \n for i,j,k in zip(cards[0],cards[1],cards[2]): # Compares each attribut in parallel to save \n if (i==j and j==k) or (i!=j and j!=k and i!=k):\n verifylist[count]=True # If the current attribut is all different or all identical, changes the verify list\n count+=1\n\n if all(verifylist): # If the verify list is all True , the chosen cards form a set\n res=True\n \n return res",
"def test_is_cover_1(set_of_sets, alphabet):\n assert is_cover(set_of_sets, alphabet)",
"def test_load_all_psets(self):\n resp = self.client.get('/networking/Fall2012/problemsets/', HTTP_USER_AGENT=self.userAgent)\n self.assertEqual(resp.status_code, 200)",
"def test_known_good_pairs(self):\n for start, goal, iternum in self.should_pass:\n self.assertTrue(mu_test(start, goal, iternum))",
"def isValid(self):\n #global newpc\n res = False\n pattern = re.compile(r'(\\b[A-Z]{1,2}[0-9][A-Z0-9]? [0-9][ABD-HJLNP-UW-Z]{2}\\b)')\n if ' ' in self.pc:\n joinpc = self.pc.replace(\" \", \"\").upper()\n try:\n if len(joinpc) == 7:\n fstchs = joinpc[:4]\n lstchs = joinpc[4:]\n if len(joinpc) == 6:\n fstchs = joinpc[:3]\n lstchs = joinpc[3:]\n if len(joinpc) == 5:\n fstchs = joinpc[:2]\n lstchs = joinpc[2:]\n \n self.newpc = fstchs+\" \"+lstchs\n match = pattern.search(self.newpc)\n \n except (AttributeError, UnboundLocalError):\n res = False\n else:\n joinpc = self.pc.upper()\n try: \n if len(joinpc) == 7:\n fstchs = joinpc[:4]\n lstchs = joinpc[4:]\n if len(joinpc) == 6:\n fstchs = joinpc[:3]\n lstchs = joinpc[3:]\n if len(joinpc) == 5:\n fstchs = joinpc[:2]\n lstchs = joinpc[2:]\n \n self.newpc = fstchs+\" \"+lstchs\n match = pattern.search(self.newpc)\n except (AttributeError, UnboundLocalError):\n res = False\n \n try:\n matchpc = match.group(1)\n \n if matchpc:\n res = True\n else:\n return res\n except (AttributeError, UnboundLocalError):\n res = False\n\n return res",
"def test_has_ec(self):\n test1 = \"hypothetical protein\"\n test2 = \"DNA polymerase IV (EC 2.7.7.7)\"\n test3 = \"Sigma-X negative effector (EC 3 and more)\"\n test4 = \"Hypothetical protein TEPIRE1_21570 (predicted: PeP phosphonomutase (predicted EC 2.7.8.23) (predicted EC 4.1.3.30))\"\n test5 = \"Glutaminase (EC 3.5.-.-)\"\n test6 = \"Histidinol-phosphatase (EC 3.1.3.-)\"\n test7 = \" (predicted EC 2.3.1.1)\"\n test8 = \"hypothetical protein (predicted: MULTISPECIES: GNAT family N-acetyltransferase [Geobacillus] (predicted EC 2.3.1.1))\"\n test9 = \"Aminodeoxychorismate lyase, EC 4.1.3.38\"\n test10 = \"Aminodeoxychorismate lyase, EC: 4.1.3.38\"\n test11 = \"Aminodeoxychorismate lyase, EC: -.-.-.-\"\n test12 = \"Histidinol-phosphatase (EC -.1.3.1)\"\n test13 = \"DNA polymerase IV (Web Scaped EC 2.7.7.7)\"\n \n self.assertFalse(Annot_Reader.has_ec(test1))\n self.assertTrue(Annot_Reader.has_ec(test2))\n self.assertFalse(Annot_Reader.has_ec(test3))\n self.assertTrue(Annot_Reader.has_ec(test4))\n self.assertTrue(Annot_Reader.has_ec(test5))\n self.assertTrue(Annot_Reader.has_ec(test6))\n self.assertTrue(Annot_Reader.has_ec(test7))\n self.assertTrue(Annot_Reader.has_ec(test8))\n self.assertTrue(Annot_Reader.has_ec(test9))\n self.assertTrue(Annot_Reader.has_ec(test10))\n self.assertFalse(Annot_Reader.has_ec(test11))\n self.assertFalse(Annot_Reader.has_ec(test12))\n self.assertTrue(Annot_Reader.has_ec(test13))\n # Run on test genome annotation\n orig = currentdir + '\\\\test_files\\\\' \"test_genome_annotation.xlsx\"\n cpy = currentdir + '\\\\test_files\\\\' \"test_genome_annotation_cpy.xlsx\"\n self.assertTrue(os.path.isfile(orig))\n if os.path.isfile(cpy):\n os.remove(cpy)\n self.assertFalse(os.path.isfile(cpy))\n # Now copy the file\n shutil.copyfile(orig, cpy)\n self.assertTrue(os.path.isfile(cpy))\n email = None\n min_pct_idnt = 97.0\n min_qry_cvr = 95.0\n max_blast_hits = 10\n max_uniprot_hits = 50\n args = {\n '--src' : orig,\n '--dest' : cpy,\n '--sheet': 0,\n '--visible' : False,\n '--keywords' : None,\n '--load_job' : None,\n '--email' : email, \n '--min_pct_idnt' : min_pct_idnt,\n '--min_qry_cvr' : min_qry_cvr,\n '--max_blast_hits' : max_blast_hits,\n '--max_uniprot_hits' : max_uniprot_hits,\n }\n reader = Annot_Reader(args)\n reader.autosave_filename = 'test_autosave.txt'\n self.assertTrue(reader.has_ec(reader.read(0, 'function')))\n self.assertTrue(reader.has_ec(reader.read(1, 'function')))\n self.assertFalse(reader.has_ec(reader.read(2, 'function')))\n self.assertTrue(reader.has_ec(reader.read(3, 'function')))\n self.assertTrue(reader.has_ec(reader.read(4, 'function')))\n self.assertTrue(reader.has_ec(reader.read(5, 'function')))\n self.assertTrue(reader.has_ec(reader.read(6, 'function')))\n self.assertTrue(reader.has_ec(reader.read(7, 'function')))\n self.assertTrue(reader.has_ec(reader.read(8, 'function')))\n self.assertTrue(reader.has_ec(reader.read(9, 'function')))\n self.assertFalse(reader.has_ec(reader.read(10, 'function')))\n self.assertFalse(reader.has_ec(reader.read(11, 'function')))\n self.assertFalse(reader.has_ec(reader.read(12, 'function')))\n self.assertFalse(reader.has_ec(reader.read(13, 'function')))\n self.assertFalse(reader.has_ec(reader.read(14, 'function')))\n self.assertTrue(reader.has_ec(reader.read(15, 'function')))\n self.assertTrue(reader.has_ec(reader.read(16, 'function')))\n self.assertTrue(reader.has_ec(reader.read(17, 'function')))\n self.assertTrue(reader.has_ec(reader.read(18, 'function')))",
"def test_is_cover_2(set_of_sets, alphabet):\n assert not is_cover(set_of_sets, alphabet)",
"def test_pos_match(self):\n measure = qpp_measure.QPPMeasure(\n measure_definition=MeasureDefinition({\n 'eligibility_options': [self.eligibility_option_with_place_of_service],\n 'performance_options': []\n })\n )\n\n test_claims = [claim.Claim({\n 'claim_lines': [\n {\n 'clm_line_hcpcs_cd': 'good_code',\n 'clm_pos_code': 23\n }\n ]\n })]\n output = measure.filter_by_eligibility_criteria(test_claims)\n assert output == test_claims",
"def test_match_primers(self):\n # The amplicon to match the primer to\n amp = Amplicon(\"AAAGCGGTTTGT\")\n # The Forward Primers\n fps = [\n Forward_Primer(\"TTT\", 0),\n Forward_Primer(\"GCC\", 1),\n Forward_Primer(\"GGG\", 2),\n Forward_Primer(\"AAA\", 3),\n Forward_Primer(\"ATA\", 4),\n ]\n # The Reverse Primers\n rps = [\n Reverse_Primer(\"GGG\", 0),\n Reverse_Primer(\"ACC\", 1),\n Reverse_Primer(\"CCC\", 2),\n Reverse_Primer(\"ACA\", 3),\n Reverse_Primer(\"GGG\", 4),\n ]\n # Run the Test for the forward primers\n amp.match_primers(fps, rps)\n self.assertTrue(amp.fP == fps[3])\n self.assertFalse(amp.fP == fps[1])\n self.assertFalse(amp.fP == fps[2])\n self.assertFalse(amp.fP == fps[0])\n self.assertFalse(amp.fP == fps[4])\n # Run the test for the reverse primers\n self.assertTrue(amp.rP == rps[3])\n self.assertFalse(amp.rP == rps[0])\n self.assertFalse(amp.rP == rps[1])\n self.assertFalse(amp.rP == rps[2])\n self.assertFalse(amp.rP == rps[4])",
"def check_test_set(frame, test_frame):\n #get the source files in the test set (and remove file extension)\n test_set = [i.split('.wav')[0] for i in test_frame['source_file']]\n\n #initialize list\n in_test_set = []\n\n #check each vocalization from downsampled set\n #TODO: deal with MZ better than this\n if set([i.split('_')[0] for i in frame['source_file']]) == {'MZ'}:\n for spec_num in range(len(frame\t)):\n spec_name = frame['source_file'].iloc[spec_num]\n short_name = spec_name.split('_na_na_na_na_na_na_na_na')[0]+spec_name.split('_na_na_na_na_na_na_na_na')[1].split('_clip')[0]+'_clip'\n if short_name in test_set: \n in_test_set.append(spec_name) \n else:\n for spec_num in range(len(frame)):\n spec_name = frame['source_file'].iloc[spec_num]\n if spec_name.split('_clip')[0]+'_clip' in test_set: \n in_test_set.append(spec_name) \n\n return in_test_set",
"def verify_csdata(self) -> None:",
"def test_is_antichain_1(set_of_sets):\n assert is_antichain(set_of_sets)",
"def test_subsets(self):\n t = self.t \n self.assertEqual(t.subsets(), frozenset(\n [frozenset('HG'), frozenset('RM')]))",
"def test_p_atch_cobtxid(self):\n pass",
"def isaset(cards):\n\n if len(cards) != 3:\n debugp ('A set must be three cards. I was \\\n passed %d.' % (len(cards)))\n return False\n uniquecolors = len(set([c.color for c in cards]))\n uniqueshapes = len(set([c.shape for c in cards]))\n uniqueshading = len(set([c.shading for c in cards]))\n uniquenumbers = len(set([c.number for c in cards]))\n if 2 in [uniquecolors, uniqueshapes, uniqueshading, uniquenumbers]:\n return False\n else:\n return True",
"def test_check_mutual_SNPs(self):\r\n\r\n #Test case 1: no mutual mismatches\r\n Allele_test = SelectHybridReads.CheckAlleleCombination(self.read_consensus, ['allele_A1', 'allele_A2'], self.allele_data)\r\n Allele_test.indicator_string = '------XXX-----XYYYY---'\r\n self.assertEqual(Allele_test.check_mutual_SNPs(), True)\r\n self.assertEqual(Allele_test.number_of_artefacts, 0)\r\n\r\n #Test case 2: one mutual mismatch (is accepted)\r\n Allele_test = SelectHybridReads.CheckAlleleCombination(self.read_consensus, ['allele_A1', 'allele_A2'], self.allele_data)\r\n Allele_test.indicator_string = '------XXM-----XYYYY---'\r\n self.assertEqual(Allele_test.check_mutual_SNPs(), True)\r\n self.assertEqual(Allele_test.number_of_artefacts, 1)\r\n\r\n #Test case 3: two mutual mismatches (is accepted)\r\n Allele_test = SelectHybridReads.CheckAlleleCombination(self.read_consensus, ['allele_A1', 'allele_A2'], self.allele_data)\r\n Allele_test.indicator_string = '------XXM-----XYMY---'\r\n self.assertEqual(Allele_test.check_mutual_SNPs(), True)\r\n self.assertEqual(Allele_test.number_of_artefacts, 2)\r\n\r\n #Test case 4: three mutual mismatches (is not accepted)\r\n Allele_test = SelectHybridReads.CheckAlleleCombination(self.read_consensus, ['allele_A1', 'allele_A2'], self.allele_data)\r\n Allele_test.indicator_string = '------XXM--M--XYMY---'\r\n self.assertEqual(Allele_test.check_mutual_SNPs(), False)\r\n self.assertEqual(Allele_test.number_of_artefacts, 3)",
"def test_is_antichain_2(set_of_sets):\n assert not is_antichain(set_of_sets)",
"def test_car_producer_success(self):\r\n for producer in CARS_PRODUCER:\r\n self.assertEqual(hw.Car.producer_checking(producer), producer)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Extract traces from emcee.EnsebleSampler and apply invers transformation of parameters
|
def get_traces(sampler, nthin):
# load every nthin'th sample from the walkers and reshape to
# final dimensions
traces = sampler.chain[:, ::nthin, :].reshape(-1, sampler.dim).copy()
# convert from sample space to meaningfull space
traces[:, [1, 4, 5]] = np.exp(traces[:, [1, 4, 5]])
return traces
|
[
"def extr():\n x = ExtractInterpretationToPoints('WV_C18_L12')\n yield x",
"def process_epidemic_parameters(self):",
"def update_traces(self):\n # Decay traces:\n # Input to hidden:\n self.xy_reg_traces *= 0. # Regular traces decay in one time step\n self.xy_mem_traces *= self.mem_decays # Memory input traces do not decay in vanilla AuGMEnT\n\n # Update traces:\n # Input to hidden\n self.xy_reg_traces += (np.hstack((np.ones(self.bias_input), self.x_reg))).reshape(self.nx_inst + self.bias_input, 1)\n self.xy_mem_traces += (self.x_trans).reshape(self.nx_trans, 1)",
"def dotheglm(sensitivities,\n eventdir,\n annot_dir):\n # normalize the sensitivities\n from sklearn.preprocessing import normalize\n import copy\n #default for normalization is the L2 norm\n sensitivities_to_normalize = copy.deepcopy(sensitivities)\n for i in range(len(sensitivities)):\n sensitivities_to_normalize[i].samples = normalize(sensitivities_to_normalize[i].samples, axis = 1)\n\n sensitivities_stacked = mv.vstack(sensitivities_to_normalize)\n if bilateral:\n sensitivities_stacked.sa['bilat_ROIs_str'] = map(lambda p: '_'.join(p),\n sensitivities_stacked.sa.targets)\n mean_sens = mv.mean_group_sample(['bilat_ROIs_str'])(sensitivities_stacked)\n else:\n sensitivities_stacked.sa['all_ROIs_str'] = map(lambda p: '_'.join(p),\n sensitivities_stacked.sa.targets)\n mean_sens = mv.mean_group_sample(['all_ROIs_str'])(sensitivities_stacked)\n mean_sens_transposed = mean_sens.get_mapped(mv.TransposeMapper())\n\n # get a list of the event files with occurances of faces\n event_files = sorted(glob(eventdir + '/*'))\n assert len(event_files) == 8\n # get additional events from the location annotation\n location_annotation = pd.read_csv(annot_dir, sep='\\t')\n\n # get all settings with more than one occurrence\n setting = [set for set in location_annotation.setting.unique()\n if (location_annotation.setting[location_annotation.setting == set].value_counts()[0] > 1)]\n\n # get onsets and durations\n onset = []\n duration = []\n condition = []\n for set in setting:\n for i in range(location_annotation.setting[location_annotation['setting'] == set].value_counts()[0]):\n onset.append(location_annotation[location_annotation['setting'] == set]['onset'].values[i])\n duration.append(location_annotation[location_annotation['setting'] == set]['duration'].values[i])\n condition.append([set] * (i + 1))\n # flatten conditions\n condition = [y for x in condition for y in x]\n assert len(condition) == len(onset) == len(duration)\n\n # concatenate the strings\n condition_str = [set.replace(' ', '_') for set in condition]\n condition_str = ['location_' + set for set in condition_str]\n\n # put it in a dataframe\n locations = pd.DataFrame({\n 'onset': onset,\n 'duration': duration,\n 'condition': condition_str\n })\n\n # sort according to onsets to be paranoid\n locations_sorted = locations.sort_values(by='onset')\n\n # this is a dataframe encoding flow of time\n time_forward = pd.DataFrame([{\n 'condition': 'time+',\n 'onset': location_annotation['onset'][i],\n 'duration': 1.0}\n for i in range(len(location_annotation) - 1)\n if location_annotation['flow_of_time'][i] in ['+', '++']])\n\n time_back = pd.DataFrame([{\n 'condition': 'time-',\n 'onset': location_annotation['onset'][i],\n 'duration': 1.0} for i in range(len(location_annotation) - 1)\n if location_annotation['flow_of_time'][i] in ['-', '--']])\n\n # sort according to onsets to be paranoid\n time_forward_sorted = time_forward.sort_values(by='onset')\n time_back_sorted = time_back.sort_values(by='onset')\n\n scene_change = pd.DataFrame([{\n 'condition': 'scene-change',\n 'onset': location_annotation['onset'][i],\n 'duration': 1.0}\n for i in range(len(location_annotation) - 1)])\n\n scene_change_sorted = scene_change.sort_values(by='onset')\n\n # this is a dataframe encoding exterior\n exterior = pd.DataFrame([{\n 'condition': 'exterior',\n 'onset': location_annotation['onset'][i],\n 'duration': location_annotation['duration'][i]}\n for i in range(len(location_annotation) - 1)\n if (location_annotation['int_or_ext'][i] == 'ext')])\n\n # sort according to onsets to be paranoid\n exterior_sorted = exterior.sort_values(by='onset')\n\n # this is a dataframe encoding nighttime\n night = pd.DataFrame([{'condition': 'night',\n 'onset': location_annotation['onset'][i],\n 'duration': location_annotation['duration'][i]}\n for i in range(len(location_annotation) - 1)\n if (location_annotation['time_of_day'][i] == 'night')])\n\n # sort according to onsets to be paranoid\n night_sorted = night.sort_values(by='onset')\n\n assert np.all(locations_sorted.onset[1:].values >= locations_sorted.onset[:-1].values)\n assert np.all(time_back_sorted.onset[1:].values >= time_back_sorted.onset[:-1].values)\n assert np.all(time_forward_sorted.onset[1:].values >= time_forward_sorted.onset[:-1].values)\n assert np.all(exterior_sorted.onset[1:].values >= exterior_sorted.onset[:-1].values)\n assert np.all(night_sorted.onset[1:].values >= night_sorted.onset[:-1].values)\n assert np.all(scene_change_sorted.onset[1:].values >= scene_change_sorted.onset[:-1].values)\n\n # check whether chunks are increasing as well as sanity check\n chunks = mean_sens_transposed.sa.chunks\n assert np.all(chunks[1:] >= chunks[:-1])\n\n # TR was not preserved/carried through in .a\n # so we will guestimate it based on the values of time_coords\n tc = mean_sens_transposed.sa.time_coords\n TRdirty = sorted(np.unique(tc[1:] - tc[:-1]))[-1]\n assert np.abs(np.round(TRdirty, decimals=2) - TRdirty) < 0.0001\n\n # make time coordinates real seconds\n mean_sens_transposed.sa.time_coords = np.arange(len(mean_sens_transposed)) * TRdirty\n\n # get runs, and runlengths in seconds\n runs = sorted(mean_sens_transposed.UC)\n assert runs == range(len(runs))\n runlengths = [np.max(tc[mean_sens_transposed.sa.chunks == run]) + TRdirty\n for run in runs]\n runonsets = [sum(runlengths[:run]) for run in runs]\n assert len(runs) == 8\n\n # initialize the list of dicts that gets later passed to the glm\n events_dicts = []\n # This is relevant to later stack all dataframes together\n # and paranoidly make sure that they have the same columns\n cols = ['onset', 'duration', 'condition']\n\n for run in runs:\n # get face data\n eventfile = sorted(event_files)[run]\n events = pd.read_csv(eventfile, sep='\\t')\n\n for index, row in events.iterrows():\n\n # disregard no faces, put everything else into event structure\n if row['condition'] != 'no_face':\n dic = {\n 'onset': row['onset'] + runonsets[run],\n 'duration': row['duration'],\n 'condition': row['condition']\n }\n events_dicts.append(dic)\n\n # concatenate all event dataframes\n run_reg = pd.DataFrame([{\n 'onset': runonsets[i],\n 'duration': abs(runonsets[i] - runonsets[i + 1]),\n 'condition': 'run-' + str(i + 1)}\n for i in range(7)])\n\n # get all of these wonderful dataframes into a list and squish them\n dfs = [locations_sorted[cols], scene_change_sorted[cols],\n time_back_sorted[cols], time_forward_sorted,\n exterior_sorted[cols], night_sorted[cols], run_reg[cols]]\n allevents = pd.concat(dfs)\n\n # save all non-face related events in an event file, just for the sake of it\n allevents.to_csv(results_dir + '/' + 'non_face_regs.tsv', sep='\\t', index=False)\n\n # append non-faceevents to event structure for glm\n for index, row in allevents.iterrows():\n dic = {\n 'onset': row['onset'],\n 'duration': row['duration'],\n 'condition': row['condition']\n }\n events_dicts.append(dic)\n\n # save this event dicts structure as a tsv file\n import csv\n with open(results_dir + '/' + 'full_event_file.tsv', 'w') as tsvfile:\n fieldnames = ['onset', 'duration', 'condition']\n writer = csv.DictWriter(tsvfile, fieldnames=fieldnames, delimiter='\\t')\n writer.writeheader()\n writer.writerows(events_dicts)\n # save this event file also as json file... can there ever be enough different files...\n import json\n with open(results_dir + '/' + 'allevents.json', 'w') as f:\n json.dump(events_dicts, f)\n\n # do the glm - we've earned it\n hrf_estimates = mv.fit_event_hrf_model(mean_sens_transposed,\n events_dicts,\n time_attr='time_coords',\n condition_attr='condition',\n design_kwargs=dict(drift_model='blank'),\n glmfit_kwargs=dict(model='ols'),\n return_model=True)\n\n mv.h5save(results_dir + '/' + 'sens_glm_avmovie_results.hdf5', hrf_estimates)\n print('calculated the, saving results.')\n\n return hrf_estimates",
"def getMetricsAtETS(self): #ETS=Every Time Step\n\n argsDict = cArgumentsDict.ArgumentsDict()\n argsDict[\"dt\"] = self.timeIntegration.dt\n argsDict[\"mesh_trial_ref\"] = self.u[0].femSpace.elementMaps.psi\n argsDict[\"mesh_grad_trial_ref\"] = self.u[0].femSpace.elementMaps.grad_psi\n argsDict[\"mesh_dof\"] = self.mesh.nodeArray\n argsDict[\"mesh_l2g\"] = self.mesh.elementNodesArray\n argsDict[\"dV_ref\"] = self.elementQuadratureWeights[('u',0)]\n argsDict[\"u_trial_ref\"] = self.u[0].femSpace.psi\n argsDict[\"u_grad_trial_ref\"] = self.u[0].femSpace.grad_psi\n argsDict[\"u_test_ref\"] = self.u[0].femSpace.psi\n argsDict[\"nElements_global\"] = self.mesh.nElements_global\n argsDict[\"nElements_owned\"] = self.mesh.nElements_owned\n argsDict[\"useMetrics\"] = int(self.coefficients.useMetrics)\n argsDict[\"q_vos\"] = self.coefficients.q_vos\n argsDict[\"u_l2g\"] = self.u[0].femSpace.dofMap.l2g\n argsDict[\"elementDiameter\"] = self.mesh.elementDiametersArray\n argsDict[\"nodeDiametersArray\"] = self.mesh.nodeDiametersArray\n argsDict[\"degree_polynomial\"] = float(self.degree_polynomial)\n argsDict[\"epsFactHeaviside\"] = self.coefficients.epsFactHeaviside\n argsDict[\"u_dof\"] = self.u[0].dof\n argsDict[\"u_dof_old\"] = self.u_dof_old\n argsDict[\"u0_dof\"] = self.u0_dof\n argsDict[\"velocity\"] = self.coefficients.q_v\n argsDict[\"offset_u\"] = self.offset[0]\n argsDict[\"stride_u\"] = self.stride[0]\n argsDict[\"numDOFs\"] = self.nFreeDOF_global[0]\n argsDict[\"R_vector\"] = self.R_vector\n argsDict[\"sR_vector\"] = self.sR_vector\n self.R_vector.fill(0.)\n self.sR_vector.fill(0.)\n (global_V,\n global_V0,\n global_sV,\n global_sV0,\n global_D_err) = self.clsvof.calculateMetricsAtETS(argsDict)\n\n from proteus.Comm import globalSum\n # metrics about conservation\n self.global_V = globalSum(global_V)\n self.global_V0 = globalSum(global_V0)\n self.global_sV = globalSum(global_sV)\n self.global_sV0 = globalSum(global_sV0)\n self.global_V_err = old_div(np.abs(self.global_V-self.global_V0),self.global_V0)\n self.global_sV_err = old_div(np.abs(self.global_sV-self.global_sV0),self.global_sV0)\n # metrics about distance property\n self.global_D_err = globalSum(global_D_err)\n # compute global_R and global_sR\n n=self.mesh.subdomainMesh.nNodes_owned\n self.global_R = np.sqrt(globalSum(np.dot(self.R_vector[0:n],self.R_vector[0:n])))\n self.global_sR = np.sqrt(globalSum(np.dot(self.sR_vector[0:n],self.sR_vector[0:n])))",
"def tracemodel(x):\n return model(x)",
"def forward(log_emlik, log_startprob, log_transmat):",
"def evaluations_vtrace(params, mu, T, gamma, V, trajs, rho, c):\n\tpi = jax.nn.softmax(params, -1)\n\tevals = Vtrace_evaluation(pi, mu, T, gamma, V, trajs, rho, c)\n\treturn evals",
"def run_model(Y,X,EM_DICT=None,verbose=0,modalpha=0.0005,removecells=1):\n\n enet=sklearn.linear_model.ElasticNet(precompute=True,l1_ratio=0.5,alpha=modalpha,max_iter=10000)\n enet.fit(X,Y)\n if verbose==1:\n print(enet.score(X,Y))\n\n Be=pd.DataFrame(enet.coef_)\n Be.columns=X.columns\n Be.index=Y.columns\n\n #EM iterateit\n Yhat=pd.DataFrame(enet.predict(X))\n Yhat.index=Y.index\n Yhat.columns=Y.columns\n SSE_all=np.square(Y.subtract(Yhat))\n\n X_adjust=X.copy()\n X_adjust['unperturbed']=[0]*len(X)\n\n df_SSE = []\n df_logit = []\n df_pf = []\n\n if EM_DICT is not None:\n\n for curcov in EM_DICT.keys():\n\n curcells=EM_DICT[curcov]\n\n X_notcur=X.copy()\n X_notcur[curcov]=[0]*len(X_notcur)\n\n X_sub=X_notcur.loc[curcells]\n\n Y_sub=Y.loc[curcells]\n\n GENE_var=2.0*Y_sub.var(axis=0)\n vargenes=GENE_var[GENE_var>0].index\n\n\n Yhat_notcur=pd.DataFrame(enet.predict(X_sub))\n Yhat_notcur.index=Y_sub.index\n Yhat_notcur.columns=Y_sub.columns\n\n SSE_notcur=np.square(Y_sub.subtract(Yhat_notcur))\n SSE=SSE_all.loc[curcells].subtract(SSE_notcur)\n SSE_sum=SSE.sum(axis=1)\n\n SSE_transform=SSE.div(GENE_var+0.5)[vargenes].sum(axis=1)\n logitify=np.divide(1.0,1.0+np.exp(SSE_sum))#SSE_transform))#sum))\n\n df_SSE.append(SSE_sum)\n df_logit.append(logitify)\n pf=np.mean(logitify>0.99)\n\n if verbose==1:\n \n print(curcov,pf)\n df_pf.append([curcov,pf])\n weak_perturb=1.0*(logitify<0.1)\n X_adjust[curcov].loc[curcells]=logitify\n X_adjust['unperturbed'].loc[curcells]=weak_perturb\n\n print('done with EM')\n\n #refit model\n\n enet=sklearn.linear_model.ElasticNet(precompute=True,l1_ratio=0.5,alpha=0.0005,max_iter=10000)\n\n if removecells==1:\n goodcells=X_adjust['unperturbed']!=1\n print(np.mean(goodcells))\n Y=Y[goodcells]\n X_adjust=X[goodcells]\n \n enet.fit(X_adjust,Y)\n Yhat=pd.DataFrame(enet.predict(X_adjust))\n Yhat.index=Y.index\n Yhat.columns=Y.columns\n\n if verbose==1:\n print(enet.score(X_adjust,Y))\n\n Be=pd.DataFrame(enet.coef_)\n Be.columns=X_adjust.columns\n Be.index=Y.columns\n RES_out=Y.subtract(Yhat) \n\n if EM_DICT is not None:\n return(Be,X_adjust,RES_out,df_pf)#,df_SSE,df_logit)\n\n return(Be,X_adjust,RES_out)#,df_SSE,df_logit)",
"def test_source_replace_listeners():\n orig_config = \"\"\"\n [nlp]\n lang = \"en\"\n pipeline = [\"transformer\",\"tagger\",\"senter\"]\n\n [components]\n\n [components.senter]\n factory = \"senter\"\n\n [components.senter.model]\n @architectures = \"spacy.Tagger.v1\"\n nO = null\n\n [components.senter.model.tok2vec]\n @architectures = \"spacy-transformers.TransformerListener.v1\"\n grad_factor = 1.0\n upstream = \"transformer\"\n\n [components.senter.model.tok2vec.pooling]\n @layers = \"reduce_mean.v1\"\n\n [components.tagger]\n factory = \"tagger\"\n\n [components.tagger.model]\n @architectures = \"spacy.Tagger.v1\"\n nO = null\n\n [components.tagger.model.tok2vec]\n @architectures = \"spacy-transformers.TransformerListener.v1\"\n grad_factor = 1.0\n upstream = \"transformer\"\n\n [components.tagger.model.tok2vec.pooling]\n @layers = \"reduce_mean.v1\"\n\n [components.transformer]\n factory = \"transformer\"\n\n [components.transformer.model]\n @architectures = \"spacy-transformers.TransformerModel.v3\"\n name = \"distilbert-base-uncased\"\n \"\"\"\n orig_config = Config().from_str(cfg_string)\n nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True)\n assert nlp.pipe_names == [\"transformer\", \"tagger\", \"senter\"]\n tagger = nlp.get_pipe(\"tagger\")\n train_examples = []\n for t in TRAIN_DATA:\n train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))\n for tag in t[1][\"tags\"]:\n tagger.add_label(tag)\n optimizer = nlp.initialize(lambda: train_examples)\n assert nlp.get_pipe(\"transformer\").listening_components == [\"tagger\", \"senter\"]\n for i in range(2):\n losses = {}\n nlp.update(train_examples, sgd=optimizer, losses=losses)\n\n with make_tempdir() as dir_path:\n nlp.to_disk(dir_path)\n base_model = str(dir_path)\n new_config = {\n \"nlp\": {\n \"lang\": \"en\",\n \"pipeline\": [\"transformer\", \"tagger\", \"senter\", \"ner\"],\n },\n \"components\": {\n \"transformer\": {\"source\": base_model},\n \"tagger\": {\n \"source\": base_model,\n \"replace_listeners\": [\"model.tok2vec\"],\n },\n \"senter\": {\n \"source\": base_model,\n \"replace_listeners\": [\"model.tok2vec\"],\n },\n \"ner\": {\n \"factory\": \"ner\",\n \"model\": {\n \"@architectures\": \"spacy.TransitionBasedParser.v2\",\n \"state_type\": \"ner\",\n \"tok2vec\": {\n \"@architectures\": \"spacy-transformers.TransformerListener.v1\",\n \"grad_factor\": 1.0,\n \"upstream\": \"transformer\",\n \"pooling\": {\"@layers\": \"reduce_mean.v1\"},\n },\n },\n },\n },\n }\n new_nlp = util.load_model_from_config(new_config, auto_fill=True)\n for component in (\"tagger\", \"senter\"):\n assert (\n new_nlp.config[\"components\"][component][\"model\"][\"tok2vec\"][\n \"@architectures\"\n ]\n == \"spacy-transformers.Tok2VecTransformer.v3\"\n )\n assert new_nlp.get_pipe(\"transformer\").listening_components == [\"ner\"]\n\n with make_tempdir() as new_dir_path:\n new_nlp.to_disk(new_dir_path)\n new_nlp_re = spacy.load(new_dir_path)\n for component in (\"tagger\", \"senter\"):\n assert (\n new_nlp.config[\"components\"][component][\"model\"][\"tok2vec\"][\n \"@architectures\"\n ]\n == \"spacy-transformers.Tok2VecTransformer.v3\"\n )\n assert new_nlp_re.get_pipe(\"transformer\").listening_components == [\"ner\"]",
"def process_ensemble(self, sender, ens):\n self.emit_ens(ens)",
"def __init__(self, trainer, data, params):\n super(EncDecEvaluator, self).__init__(trainer, data, params)\n self.encoder = trainer.encoder\n self.decoder = trainer.decoder\n self.classifier = trainer.classifier",
"def preprocess(st_in,inv=None,event=None):\n st = st_in.copy()\n\n # trim to event origin time\n if event:\n origintime = event.origins[0].time\n st.trim(origintime,origintime+1000)\n\n # PREPROCESS and CONVERT to cm/s\n st.detrend('linear')\n st.detrend('demean')\n st.taper(max_percentage=0.05)\n if inv:\n st.attach_response(inventories=inv)\n st.remove_response(output='VEL',water_level=60)\n\n st.detrend('linear')\n st.detrend('demean')\n st.taper(max_percentage=0.05)\n\n for tr in st:\n tr.data *= 1E6\n\n # FILTER, global bounds\n freqmin,freqmax = 1/bounds[1],1/bounds[0]\n st.filter(\"bandpass\",freqmin=freqmin,freqmax=freqmax,corners=3)\n st.detrend('linear')\n st.detrend('demean')\n st.taper(max_percentage=0.05)\n\n return st",
"def transform(self, ctx, modules):",
"def _GenerateFromE1E2(config, base, value_type):\n req = { 'e1' : float, 'e2' : float }\n kwargs, safe = GetAllParams(config, base, req=req)\n #print(base['obj_num'],'Generate from E1E2: kwargs = ',kwargs)\n return Shear(**kwargs), safe",
"def _add_trace(self):\n new_trace = [\n self._rdp_generator.encoder((a, self._encode_reward(r), sp))\n for _, a, r, sp, _ in self.current_episode\n ]\n self.dataset.append(new_trace + [-1])",
"def generate_extra_statistics(self, sample):\n self.parameters.update(sample.copy())\n self.parameters, added_keys = self.conversion_function(self.parameters)\n self.hyper_prior.parameters.update(self.parameters)\n ln_ls = self._compute_per_event_ln_bayes_factors()\n for ii in range(self.n_posteriors):\n sample[f\"ln_bf_{ii}\"] = float(ln_ls[ii])\n sample[\"selection\"] = float(self.selection_function(self.parameters))\n if added_keys is not None:\n for key in added_keys:\n self.parameters.pop(key)\n return sample",
"def test_header_to_trace_set_params(self):\n trace_count = 100\n sample_count = 1000\n\n try:\n with trsfile.open(self.tmp_path, 'w', headers={\n Header.LABEL_X: \"s\",\n Header.LABEL_Y: \"V\",\n Header.OFFSET_X: 100,\n Header.SCALE_X: 1.1,\n Header.SCALE_Y: 0.9,\n Header.TRACE_OFFSET: 200,\n Header.LOGARITHMIC_SCALE: False,\n Header.ACQUISITION_RANGE_OF_SCOPE: 1.0,\n Header.ACQUISITION_COUPLING_OF_SCOPE: 2,\n Header.ACQUISITION_OFFSET_OF_SCOPE: 3.0,\n Header.ACQUISITION_INPUT_IMPEDANCE: 4.0,\n Header.ACQUISITION_DEVICE_ID: '5',\n Header.ACQUISITION_TYPE_FILTER: 6,\n Header.ACQUISITION_FREQUENCY_FILTER: 7.0,\n Header.ACQUISITION_RANGE_FILTER: 8.0,\n Header.EXTERNAL_CLOCK_USED: True,\n Header.EXTERNAL_CLOCK_THRESHOLD: 9.0,\n Header.EXTERNAL_CLOCK_MULTIPLIER: 10,\n Header.EXTERNAL_CLOCK_PHASE_SHIFT: 11,\n Header.EXTERNAL_CLOCK_RESAMPLER_MASK: 12,\n Header.EXTERNAL_CLOCK_RESAMPLER_ENABLED: False,\n Header.EXTERNAL_CLOCK_FREQUENCY: 13.0,\n Header.EXTERNAL_CLOCK_BASE: 14,\n Header.NUMBER_VIEW: 15,\n Header.TRACE_OVERLAP: True,\n Header.NUMBER_OF_ENABLED_CHANNELS: 16,\n Header.NUMBER_OF_USED_OSCILLOSCOPES: 17,\n Header.XY_SCAN_WIDTH: 18,\n Header.XY_SCAN_HEIGHT: 19,\n Header.XY_MEASUREMENTS_PER_SPOT: 20,\n }) as trs_traces:\n trs_traces.extend([\n Trace(\n SampleCoding.FLOAT,\n [0] * sample_count,\n TraceParameterMap({'LEGACY_DATA': ByteArrayParameter(i.to_bytes(8, byteorder='big'))})\n )\n for i in range(0, trace_count)]\n )\n expected_trace_set_parameters = TraceSetParameterMap()\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.DISPLAY_HINT_X_LABEL, \"s\")\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.DISPLAY_HINT_Y_LABEL, \"V\")\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.X_OFFSET, 100)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.X_SCALE, 1.1)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.Y_SCALE, 0.9)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.TRACE_OFFSET, 200)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.DISPLAY_HINT_USE_LOG_SCALE, False)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_OSCILLOSCOPE_RANGE, 1.0)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_OSCILLOSCOPE_COUPLING, 2)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_OSCILLOSCOPE_OFFSET, 3.0)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_OSCILLOSCOPE_INPUT_IMPEDANCE, 4.0)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_OSCILLOSCOPE_DEVICE_IDENTIFIER, '5')\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_FILTER_TYPE, 6)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_FILTER_FREQUENCY, 7.0)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_FILTER_RANGE, 8.0)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_EXT_CLK_ENABLED, True)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_EXT_CLK_THRESHOLD, 9.0)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_EXT_CLK_MULTIPLIER, 10)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_EXT_CLK_PHASESHIFT, 11)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_EXT_CLK_RESAMPLER_MASK, 12)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_EXT_CLK_RESAMPLER_MASK_ENABLED, False)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_EXT_CLK_FREQUENCY, 13.0)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_EXT_CLK_TIMEBASE, 14)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.DISPLAY_HINT_NUM_TRACES_SHOWN, 15)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.DISPLAY_HINT_TRACES_OVERLAP, True)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_OSCILLOSCOPE_ACTIVE_CHANNEL_COUNT, 16)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_OSCILLOSCOPE_COUNT, 17)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_XYZ_GRID_COUNT_X, 18)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_XYZ_GRID_COUNT_Y, 19)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_XYZ_MEASUREMENTS_PER_SPOT, 20)\n self.assertDictEqual(trs_traces.get_headers()[Header.TRACE_SET_PARAMETERS], expected_trace_set_parameters)\n except Exception as e:\n self.fail('Exception occurred: ' + str(e))",
"def predict_collect(self, src, collector) -> None:\n ..."
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check if we want to be in dev mode or staging mode, this will be used to pull in correct settings overrides.
|
def env_mode():
if os.environ.get('DEV_MODE') is not None:
return 'DEV'
if os.environ.get('STAGING_MODE') is not None:
return 'STAGING'
|
[
"def is_dev_env() -> bool:\n if os.getenv(\"APP_ENV\") == \"dev\":\n return True\n return False",
"def is_dev():\n return os.environ.get('SERVER_SOFTWARE', '').startswith('Development/')",
"def is_production() -> bool:\n\n return conf(\"app.mode\") == \"prod\"",
"def is_dev():\n\treturn os.environ['SERVER_SOFTWARE'].startswith('Dev')",
"def _is_local():\n return (bool(os.getenv('LOCAL_DEVELOPMENT')) or\n os.getenv('SERVER_SOFTWARE', '').startswith('Development/'))",
"def is_local_dev_server():\n return os.environ.get('SERVER_SOFTWARE', '').startswith('Development')",
"def is_development():\n name = os.environ.get('SERVER_NAME', '')\n return (\n os.environ.get('SERVER_SOFTWARE', '').startswith('Development')\n or name.startswith('dev-')\n or name.startswith('test')\n or name.startswith('master')\n )",
"def is_local_env():\n # This is set on template-samp.yaml\n logging.info(f\"ENVIRONMENT: {os.getenv('TEST_ENV')}\")\n return os.getenv(\"TEST_ENV\") == \"LOCAL\" or is_local_dynamo()",
"def check_django_invariants():\n\n from django.conf import settings as s\n\n # DEBUG features and permissions mistakes must be forbidden on production boxes\n if 'blitzka-prod' in s.HOSTNAME:\n assert s.PENNY_ENV == 'PROD', 'blitzka-prod must run in ENV=PROD mode'\n assert s.DJANGO_USER == 'www-data', 'Django can only be run as user www-data'\n assert not s.DEBUG, 'DEBUG=True is never allowed on prod and beta!'\n assert not s.ENABLE_DEBUG_TOOLBAR, 'Debug toolbar is never allowed on prod!'\n assert s.DEFAULT_HTTP_PROTOCOL == 'https', 'https is required on prod servers'\n assert s.TIME_ZONE == 'UTC', 'Prod servers must always be set to UTC timezone'\n assert s.REPO_DIR == '/opt/blitzka', 'Repo must be in /opt/blitzka on prod'\n\n # tests can pollute the data dir and use lots of CPU / Memory\n # only disable this check if you're 100% confident it's safe and have a\n # very good reason to run tests on production. remember to try beta first\n assert not s.IS_TESTING, 'Tests should not be run on prod machines'\n\n elif 'blitzka-beta' in s.HOSTNAME:\n assert s.PENNY_ENV == 'BETA', 'blitzka-beta must run in ENV=BETA mode'\n assert s.DJANGO_USER == 'www-data', 'Django can only be run as user www-data'\n assert not s.DEBUG, 'DEBUG=True is never allowed on prod and beta!'\n assert s.DEFAULT_HTTP_PROTOCOL == 'https', 'https is required on prod servers'\n assert s.TIME_ZONE == 'UTC', 'Prod servers must always be set to UTC timezone'\n assert s.REPO_DIR == '/opt/blitzka', 'Repo must be in /opt/blitzka on prod'\n\n # make sure all security-sensitive settings are coming from safe sources\n for setting_name in s.SECURE_SETTINGS:\n defined_in = get_setting_source(s.SETTINGS_SOURCES, setting_name)\n\n if s.PENNY_ENV in ('PROD', 'BETA'):\n assert defined_in in s.SECURE_SETTINGS_SOURCES, (\n 'Security-sensitive settings must only be defined in secrets.env!\\n'\n f' Missing setting: {setting_name} in secrets.env\\n'\n f' Found in: {defined_in}'\n )\n\n if s.PENNY_ENV == 'PROD':\n # make sure settings are not defaults on prod\n assert getattr(s, setting_name) != s._PLACEHOLDER_FOR_UNSET, (\n 'Security-sensitive settings must be defined in secrets.env\\n'\n f' Missing setting: {setting_name} in secrets.env'\n )\n\n # if s.IS_TESTING:\n # assert s.REDIS_DB != s.SETTINGS_DEFAULTS['REDIS_DB'], (\n # 'Tests must be run with a different redis db than the main redis')",
"def dev(self):\r\n try:\r\n dev = self.get('dev')\r\n if isinstance(dev, bool):\r\n return dev\r\n else:\r\n return False\r\n except KeyError, e:\r\n return False",
"def isProdHost():\n\n return _Control.TIER.name == \"PROD\"",
"def _is_running_on_app_engine():\n return os.getenv('GAE_ENV') or (\n os.getenv('SERVER_SOFTWARE') and\n (os.getenv('SERVER_SOFTWARE').startswith('Development/') or\n os.getenv('SERVER_SOFTWARE').startswith('Google App Engine/')))",
"def isDebug():\n return isLocal() or getMelangeVersion() == 'devvin'",
"def local_run():\n server_software = os.environ.get('SERVER_SOFTWARE')\n if server_software is None:\n return True\n if 'remote_api' in server_software:\n return False\n if server_software.startswith(('Development', 'testutil')):\n return True\n return False",
"def _check_dev_mode():\n import sys\n if \"-dev\" in sys.argv:\n return os.path.pardir\n else:\n return os.path.curdir",
"def testing():\n return getattr(settings, 'TESTING', False)",
"def get_environment():\n if os.environ.get('SERVER_NAME', '').startswith('production'):\n return 'production'\n elif os.environ.get('SERVER_NAME', '').startswith('staging'):\n return 'staging'\n elif os.environ.get('SERVER_NAME', '').startswith('v') and os.environ.get(\n 'SERVER_NAME', ''\n ).endswith('appspot.com'):\n return 'testing'\n elif os.environ.get('SERVER_NAME', '').startswith('test'):\n return 'test'\n return 'development'",
"def load_presets_dev(self):\n presets_dev = requests.get(self.preset_dev_endpoint).json()\n settings_dev = requests.get(self.settings_dev_endpoint).json()\n return {\n key: {\n 'full_name': value['fullName'],\n 'settings': settings_dev.get(value['fullName']),\n }\n for key, value in presets_dev.items()\n if value['fullName'] in settings_dev\n }",
"def test_dev(self):\r\n dev = Config.dev()\r\n self.assertIsInstance(dev, bool)\r\n \r\n Config.data['dev'] = 'True'\r\n dev = Config.dev()\r\n self.assertFalse(dev)\r\n \r\n Config.data['dev'] = True\r\n dev = Config.dev()\r\n self.assertTrue(dev)\r\n \r\n Config.data['dev'] = 'Yes'\r\n dev = Config.dev()\r\n self.assertFalse(dev)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests that amity creates rooms of either office or living space
|
def test_create_room(self):
self.assertIn("created successfully", self.amity.create_room(["earth"], "office"))
|
[
"def test_amity_does_not_create_duplicte_rooms(self):\n self.amity.create_room([\"void\"], \"office\")\n response = self.amity.create_room([\"void\"], \"livingspace\")\n self.assertEqual(1, len(self.amity.rooms))",
"def test_can_add_office(self):\r\n self.amity.add_room(\"Codango\", 'offices')\r\n self.assertIn('Codango', self.amity.rooms['offices'])",
"def test_can_add_livingspace(self):\r\n self.amity.add_room(\"Troupon\", 'livingspaces')\r\n self.assertIn('Troupon', self.amity.rooms['livingspaces'])",
"def test_add_person_allocates_rooms(self):\n self.amity.create_room([\"mars\"], \"office\")\n self.amity.create_room([\"earth\"], \"livingspace\")\n response = self.amity.add_person(\"fellow\", \"monkey luffy\", \"y\")\n self.assertIn(\"successfully\", response)",
"def test_for_room_avaialble(self):\n\t\tself.assertIs(self.office.is_filled(),False)",
"def test_amenities_belongs_to_room(self):\n user2 = sample_user(\n email='diffuser@diff.com', \n password='diffuserpassword')\n room = sample_room(user=user2, name='Different room')\n room.amenities.add(sample_aminitie(name='Tv'))\n \n other_room = sample_room(user=self.user, name=\"palace room\")\n other_room.amenities.add(sample_aminitie(name='Internet'))\n\n res = self.client.get(AMENITY_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], room.name)",
"def test_is_empty_room(self):\n self.assertTrue(self.gamerules.is_empty_room(\"Dining\")) # Room can be occupided by more than 1 person\n self.assertTrue(self.gamerules.is_empty_room(\"Study-Library\")) #Empty Hallway, is empty\n self.assertFalse(self.gamerules.is_empty_room(\"Study-Hall\")) #Occupied Hallway",
"def test_add_person_staff_cannot_be_allocated_livingspace(self):\n self.amity.create_room([\"pluto\"], \"livingspace\")\n response = self.amity.add_person(\"staff\", \"Sakazuki Akainu\", \"Y\")\n self.assertIn(\"staff can not be allocated accommodation\", response)",
"def test_add_person_cannot_allocate_person_to_a_full_room(self):\n self.amity.create_room([\"jupiter\"], \"office\")\n self.amity.add_person(\"staff\", \"Monkey Garp\")\n self.amity.add_person(\"staff\", \"Kuzan Aokiji\")\n self.amity.add_person(\"staff\", \"Bosalino Kizaru\")\n self.amity.add_person(\"staff\", \"Monkey Dragon\")\n self.amity.add_person(\"staff\", \"Sakazuki Akainu\")\n self.amity.add_person(\"staff\", \"shem ogumbe\")\n response = self.amity.add_person(\"staff\", \"nico robin\")\n self.assertIn(\"unallocated\", response)",
"def test_api_rooms_create_authenticated(self):\n user = UserFactory()\n jwt_token = AccessToken.for_user(user)\n\n response = self.client.post(\n \"/api/rooms/\",\n {\n \"name\": \"my room\",\n },\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 201)\n room = Room.objects.get()\n self.assertEqual(room.name, \"my room\")\n self.assertEqual(room.slug, \"my-room\")\n self.assertTrue(room.accesses.filter(role=\"owner\", user=user).exists())",
"def test_get_rooms(self):\n self.board.get_rooms",
"def test_room_create(self):\n type(self).room = Room()",
"def test_some_rooms_are_booked_but_all_room_type_are_available(self):\n blocked_day_room121 = BlockedDayFactory(date=datetime.datetime(2021, 4, 28), hotel_room=self.hotelroom121)\n \n params = {\n 'max_price':'800',\n 'check_in':'2021-04-27',\n 'check_out':'2021-04-29'\n }\n\n response = self.client.get(reverse('units'), params)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data[0]['title'], self.hotel_lux.title)\n self.assertEqual(response.data[1]['title'], self.apartment.title)",
"def test_process_room_only_runs(self):\n # Get all the rooms that the game should recognize.\n data_dir = os.path.abspath('data')\n rooms_full_path = os.path.join(data_dir, ROOMS_FILENAME)\n with open(rooms_full_path, \"r\") as rooms_file:\n rooms_dict_str = rooms_file.read()\n rooms_dict = json.loads(rooms_dict_str)\n # Add the cardinal directions to the rooms dict\n rooms_dict[\"north\"] = \"north\"\n rooms_dict[\"east\"] = \"east\"\n rooms_dict[\"south\"] = \"south\"\n rooms_dict[\"west\"] = \"west\"\n for room in rooms_dict:\n print \"TESTING COMMAND: \" + room\n processed_command = parser.parse_command(room)\n output_type = processed_command[\"type\"]\n title = None\n action = None\n top_level = [\"item\", \"room\", \"feature\"]\n for word in top_level:\n if word in processed_command['command']:\n title = processed_command['command'][word]\n if \"action\" in processed_command['command']:\n action = processed_command['command']['action']\n res = self.game.process_parsed_command(output_type, title, action)\n if res:\n self.game.post_process(res)",
"def test_3_Room0(self):\n l_xml = self.m_xml.room\n # print(PrettyFormatAny.form(self.m_xml.room, 'Room'))\n self.assertEqual(l_xml.attrib['Name'], TESTING_ROOM_NAME_0)\n self.assertEqual(l_xml.attrib['Key'], TESTING_ROOM_KEY_0)\n self.assertEqual(l_xml.attrib['Active'], TESTING_ROOM_ACTIVE_0)\n self.assertEqual(l_xml.find('UUID').text, TESTING_ROOM_UUID_0)\n self.assertEqual(l_xml.find('Comment').text, TESTING_ROOM_COMMENT_0)\n self.assertEqual(l_xml.find('Corner').text, TESTING_ROOM_CORNER_0)\n self.assertEqual(l_xml.find('Floor').text, TESTING_ROOM_FLOOR_0)\n self.assertEqual(l_xml.find('LastUpdate').text, str(TESTING_ROOM_LAST_UPDATE_0))\n self.assertEqual(l_xml.find('Size').text, TESTING_ROOM_SIZE_0)\n self.assertEqual(l_xml.find('RoomType').text, TESTING_ROOM_TYPE_0)",
"def test_all_doublerooms_are_booked(self):\n blocked_day_room121 = BlockedDayFactory(date=datetime.datetime(2021, 4, 28), hotel_room=self.hotelroom121)\n blocked_day_room122 = BlockedDayFactory(date=datetime.datetime(2021, 4, 28), hotel_room=self.hotelroom122)\n \n params = {\n 'max_price':'800',\n 'check_in':'2021-04-27',\n 'check_out':'2021-04-29'\n }\n\n response = self.client.get(reverse('units'), params)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data[0]['price'], float(self.bookinginfo_tripleroom.price))\n self.assertEqual(response.data[1]['price'], float(self.bookinginfo_apt.price))",
"def test_create_game_room(self):\n url = '/api/gameroom/create/'\n data = {'name': 'test', \"password\": 'test', 'longitude': 1.1, 'latitude': 1.1}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(GameRoom.objects.count(), 1)\n self.assertEqual(GameRoom.objects.get(pk=1).name, 'test')",
"def test_register_meeting_room(self):\n\n data = {\n \"name\": \"Test Sala\",\n \"place\": \"Sem lugar\",\n \"description\": \"Sala de reuniao de teste\"\n }\n\n resp = self.client.post(\"/api/meeting-room/\", data=data)\n self.assertEqual(resp.status_code, 201)",
"def test_api_rooms_create_anonymous(self):\n response = self.client.post(\n \"/api/rooms/\",\n {\n \"name\": \"my room\",\n },\n )\n\n self.assertEqual(response.status_code, 401)\n self.assertFalse(Room.objects.exists())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that amity does not create duplicate rooms
|
def test_amity_does_not_create_duplicte_rooms(self):
self.amity.create_room(["void"], "office")
response = self.amity.create_room(["void"], "livingspace")
self.assertEqual(1, len(self.amity.rooms))
|
[
"def test_create_room(self):\n self.assertIn(\"created successfully\", self.amity.create_room([\"earth\"], \"office\"))",
"def test_add_person_allocates_rooms(self):\n self.amity.create_room([\"mars\"], \"office\")\n self.amity.create_room([\"earth\"], \"livingspace\")\n response = self.amity.add_person(\"fellow\", \"monkey luffy\", \"y\")\n self.assertIn(\"successfully\", response)",
"def test_can_add_office(self):\r\n self.amity.add_room(\"Codango\", 'offices')\r\n self.assertIn('Codango', self.amity.rooms['offices'])",
"def test_add_person_cannot_allocate_person_to_a_full_room(self):\n self.amity.create_room([\"jupiter\"], \"office\")\n self.amity.add_person(\"staff\", \"Monkey Garp\")\n self.amity.add_person(\"staff\", \"Kuzan Aokiji\")\n self.amity.add_person(\"staff\", \"Bosalino Kizaru\")\n self.amity.add_person(\"staff\", \"Monkey Dragon\")\n self.amity.add_person(\"staff\", \"Sakazuki Akainu\")\n self.amity.add_person(\"staff\", \"shem ogumbe\")\n response = self.amity.add_person(\"staff\", \"nico robin\")\n self.assertIn(\"unallocated\", response)",
"def test_api_rooms_create_authenticated_existing_slug(self):\n RoomFactory(name=\"my room\")\n user = UserFactory()\n jwt_token = AccessToken.for_user(user)\n\n response = self.client.post(\n \"/api/rooms/\",\n {\n \"name\": \"My Room!\",\n },\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(\n response.json(), {\"slug\": [\"Room with this Slug already exists.\"]}\n )",
"def test_amenities_belongs_to_room(self):\n user2 = sample_user(\n email='diffuser@diff.com', \n password='diffuserpassword')\n room = sample_room(user=user2, name='Different room')\n room.amenities.add(sample_aminitie(name='Tv'))\n \n other_room = sample_room(user=self.user, name=\"palace room\")\n other_room.amenities.add(sample_aminitie(name='Internet'))\n\n res = self.client.get(AMENITY_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], room.name)",
"def test_room_exits(self):\n assert not self.room.get_exits()\n another_room = Room()\n another_room.x, another_room.y, another_room.z = (5, 5, 6)\n assert self.room.get_exits() == {\"up\": another_room}\n del Room._caches[\"uid\"][another_room.uid]\n del another_room\n gc.collect()\n assert not self.room.get_exits()",
"def test_api_rooms_create_anonymous(self):\n response = self.client.post(\n \"/api/rooms/\",\n {\n \"name\": \"my room\",\n },\n )\n\n self.assertEqual(response.status_code, 401)\n self.assertFalse(Room.objects.exists())",
"def test_can_add_livingspace(self):\r\n self.amity.add_room(\"Troupon\", 'livingspaces')\r\n self.assertIn('Troupon', self.amity.rooms['livingspaces'])",
"def test_create_meeting_duplicate(self):\n\n meeting = Meeting.objects.all()[0]\n\n data = {\n \"name\": \"Reunião de Test Invalid\",\n \"meeting_room\": self.meeting_room.id,\n \"start\": meeting.start,\n \"end\": meeting.end\n }\n\n resp = self.client.post(\"/api/meeting/\", data=data)\n self.assertEqual(resp.status_code, 400)\n\n data = resp.json()\n self.assertIn(\"Esta sala ja esta reservada para esse horario\", data['non_field_errors'])",
"def test_room_create(self):\n type(self).room = Room()",
"def test_api_rooms_create_authenticated(self):\n user = UserFactory()\n jwt_token = AccessToken.for_user(user)\n\n response = self.client.post(\n \"/api/rooms/\",\n {\n \"name\": \"my room\",\n },\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 201)\n room = Room.objects.get()\n self.assertEqual(room.name, \"my room\")\n self.assertEqual(room.slug, \"my-room\")\n self.assertTrue(room.accesses.filter(role=\"owner\", user=user).exists())",
"def test_is_empty_room(self):\n self.assertTrue(self.gamerules.is_empty_room(\"Dining\")) # Room can be occupided by more than 1 person\n self.assertTrue(self.gamerules.is_empty_room(\"Study-Library\")) #Empty Hallway, is empty\n self.assertFalse(self.gamerules.is_empty_room(\"Study-Hall\")) #Occupied Hallway",
"def test_for_room_avaialble(self):\n\t\tself.assertIs(self.office.is_filled(),False)",
"def test_resolve_rooms_id_all_at_once(self):\n\n floor_0 = self.db_building[\"dxf\"][\"floors\"][0]\n floor_1 = self.db_building[\"dxf\"][\"floors\"][1]\n room_00 = floor_0[\"unidentified_rooms\"][0]\n room_02 = floor_0[\"unidentified_rooms\"][2]\n room_10 = floor_1[\"unidentified_rooms\"][0]\n room_12 = floor_1[\"unidentified_rooms\"][2]\n\n DXFRoomIdsResolver.resolve_rooms_id(\n self.building,\n None,\n None\n )\n\n self.assertEqual(floor_1[\"rooms\"][\"R023\"], self.final_rooms[\"R023\"])\n self.assertEqual(floor_1[\"rooms\"][\"R022\"], self.final_rooms[\"R022\"])\n self.assertEqual(floor_0[\"rooms\"][\"R002\"], self.final_rooms[\"R002\"])\n self.assertEqual(floor_0[\"rooms\"][\"R003\"], self.final_rooms[\"R003\"])\n self.assertTrue(room_00 not in floor_0[\"unidentified_rooms\"])\n self.assertTrue(room_02 not in floor_0[\"unidentified_rooms\"])\n self.assertTrue(room_10 not in floor_1[\"unidentified_rooms\"])\n self.assertTrue(room_12 not in floor_1[\"unidentified_rooms\"])",
"def test_duplicate_creation(self):\n valid_regions = [\n {'name': 'First Region', 'shard': self.first_shard},\n {'name': 'First Region', 'shard': self.second_shard},\n {'name': 'Second Region', 'shard': self.first_shard},\n ]\n\n regions = []\n for region in valid_regions:\n regions.append(Region.objects.create(name=region['name'], shard=region['shard']))\n\n for region in valid_regions:\n with self.assertRaises(IntegrityError):\n Region.objects.create(name=region['name'], shard=region['shard'])\n\n self.assertSequenceEqual(Region.objects.all(), regions)",
"def test_create_game_room(self):\n url = '/api/gameroom/create/'\n data = {'name': 'test', \"password\": 'test', 'longitude': 1.1, 'latitude': 1.1}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(GameRoom.objects.count(), 1)\n self.assertEqual(GameRoom.objects.get(pk=1).name, 'test')",
"def test_add_delete_reading_rooms(self):\n\n census = Office.objects.get(\n slug='department-of-commerce--census-bureau')\n all_rooms = census.reading_room_urls.all().count()\n self.assertEqual(0, all_rooms)\n\n data = {\n 'reading_rooms': [\n ['Url One', 'http://urlone.gov'],\n ['Url Two', 'http://urltwo.gov']]}\n update_reading_rooms(census, data)\n all_rooms = census.reading_room_urls.all()\n self.assertEqual(2, len(all_rooms))\n\n data = {\n 'reading_rooms': [\n ['Url One', 'http://urlone.gov'],\n ['Url Three', 'http://urlthree.gov']]}\n update_reading_rooms(census, data)\n rr_count = census.reading_room_urls.all().count()\n self.assertEqual(2, rr_count)",
"def test_create_duplicated_area(self):\n # create our user so we can authenticate \n res = self.ph.create_user(self.test_user_name, self.test_user_password)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED, res.get_data(as_text=True))\n\n # create a new area and assert the respose values\n new_area_name = 'New Information'\n post_res = self.ph.create_area(new_area_name)\n self.assertEqual(post_res.status_code, status.HTTP_201_CREATED, post_res.get_data(as_text=True))\n self.assertEqual(Area.query.count(), 1)\n post_res_data = json.loads(post_res.get_data(as_text=True))\n self.assertEqual(post_res_data['name'], new_area_name)\n\n # try to assert it again, and assert the status code is an http 400\n second_post_res = self.ph.create_area(new_area_name)\n self.assertEqual(second_post_res.status_code, status.HTTP_400_BAD_REQUEST, \"The insertion of a duplicate area didn't return a 400 code\")\n self.assertEqual(Area.query.count(), 1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that amity can add a person to a the amity system
|
def test_add_person(self):
role = "FELLOW"
name = "SAKAZUKI AKAINO"
accommodate = "Y"
response = self.amity.add_person(role, name, accommodate)
self.assertIn("has been added successfully to the system", response)
|
[
"def test_add_person_allocates_rooms(self):\n self.amity.create_room([\"mars\"], \"office\")\n self.amity.create_room([\"earth\"], \"livingspace\")\n response = self.amity.add_person(\"fellow\", \"monkey luffy\", \"y\")\n self.assertIn(\"successfully\", response)",
"def test_add_person_cannot_allocate_person_to_a_full_room(self):\n self.amity.create_room([\"jupiter\"], \"office\")\n self.amity.add_person(\"staff\", \"Monkey Garp\")\n self.amity.add_person(\"staff\", \"Kuzan Aokiji\")\n self.amity.add_person(\"staff\", \"Bosalino Kizaru\")\n self.amity.add_person(\"staff\", \"Monkey Dragon\")\n self.amity.add_person(\"staff\", \"Sakazuki Akainu\")\n self.amity.add_person(\"staff\", \"shem ogumbe\")\n response = self.amity.add_person(\"staff\", \"nico robin\")\n self.assertIn(\"unallocated\", response)",
"def test_add_person_staff_cannot_be_allocated_livingspace(self):\n self.amity.create_room([\"pluto\"], \"livingspace\")\n response = self.amity.add_person(\"staff\", \"Sakazuki Akainu\", \"Y\")\n self.assertIn(\"staff can not be allocated accommodation\", response)",
"def test_for_add_people(self):\n\t\tperson = Fellow(\"Abiodun\")\n\t\tself.office.add_person(person)\n\t\tself.assertGreater(len(self.office.people),0)",
"def test_unallocated_person(self):\n\n response = self.amity.add_person(\"staff\", \"Kuzan Aokiji\")\n self.assertIn(\"unallocated\", response)",
"def test_member_add_regression(self):\n self.assertEqual(self.organization.members.count(), 1)\n self.add_member(username='tester')\n self.assertEqual(self.organization.members.count(), 1)\n self.assertEqual(self.organization.owners.count(), 1)",
"def test_owner_add(self):\n self.assertEqual(self.organization.owners.count(), 1)\n self.add_owner(username='tester')\n self.assertEqual(self.organization.owners.count(), 2)",
"def test_update_person(self):\n pass",
"def test_add_person(self):\n\t\tdraft = ReviewRequestDraft.create(self.review_request)\n\t\tdraft.summary = \"Test Summary\"\n\t\tdraft.target_people.add(self.user)\n\t\tself._check_counters(total_outgoing=1, pending_outgoing=1)\n\t\tself.review_request.publish(self.user)\n\t\tself._check_counters(total_outgoing=1, pending_outgoing=1, direct_incoming=1, total_incoming=1, starred_public=1)",
"def test_create_room(self):\n self.assertIn(\"created successfully\", self.amity.create_room([\"earth\"], \"office\"))",
"def test_can_add_office(self):\r\n self.amity.add_room(\"Codango\", 'offices')\r\n self.assertIn('Codango', self.amity.rooms['offices'])",
"def test_owner_add(self):\n self.assertEqual(self.organization.owners.count(), 1)\n self.add_owner(username=\"tester\")\n self.assertEqual(self.organization.owners.count(), 1)\n invitation = Invitation.objects.for_object(self.organization).get()\n self.assertEqual(invitation.from_user, self.owner)\n self.assertEqual(invitation.to_user, User.objects.get(username=\"tester\"))",
"def test_add_agents():\n\n model = ap.Model()\n model.add_agents(3)\n\n assert len(model.agents) == 3 # New agents\n assert list(model.agents.id) == [1, 2, 3]\n\n model.add_agents(model.agents) # Existing agents\n assert list(model.agents.id) == [1, 2, 3] * 2",
"def test_add_actor(self):\n actor_name = \"test_actor\"\n self.api.add_actor(name=actor_name, tags=['asd'])\n actor=self.api.entity_search(name=actor_name)\n self.assertEqual(actor[0]['name'], actor_name)",
"def test_skill_creation(self):\n self.assertEqual(self.skill1.name, 'Skill1')\n self.assertTrue(self.skill1.users.all())",
"def test_add_card(self) -> None:\r\n self.localisation.apply_user_change(5, self.user)\r\n ownership = self.localisation.ownerships.get(owner=self.user)\r\n self.assertEqual(ownership.count, 5)",
"def test_add_population_callable(self):\n self.biosim.add_population(self.population)",
"def sample_aminitie(name):\n return Amenity.objects.create(name=name)",
"def test_add_participant(self):\n management.call_command(\n 'add_participant', email='user1@test.com', competition=self.competitions[0])\n p = CompetitionParticipant.objects.get(\n user__email='user1@test.com', competition_id=self.competitions[0])\n assert(p.competition_id == self.competitions[0])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that when a person is added to a room the person is allocated a room
|
def test_add_person_allocates_rooms(self):
self.amity.create_room(["mars"], "office")
self.amity.create_room(["earth"], "livingspace")
response = self.amity.add_person("fellow", "monkey luffy", "y")
self.assertIn("successfully", response)
|
[
"def test_add_person_cannot_allocate_person_to_a_full_room(self):\n self.amity.create_room([\"jupiter\"], \"office\")\n self.amity.add_person(\"staff\", \"Monkey Garp\")\n self.amity.add_person(\"staff\", \"Kuzan Aokiji\")\n self.amity.add_person(\"staff\", \"Bosalino Kizaru\")\n self.amity.add_person(\"staff\", \"Monkey Dragon\")\n self.amity.add_person(\"staff\", \"Sakazuki Akainu\")\n self.amity.add_person(\"staff\", \"shem ogumbe\")\n response = self.amity.add_person(\"staff\", \"nico robin\")\n self.assertIn(\"unallocated\", response)",
"def test_create_room(self):\n self.assertIn(\"created successfully\", self.amity.create_room([\"earth\"], \"office\"))",
"def test_can_add_office(self):\r\n self.amity.add_room(\"Codango\", 'offices')\r\n self.assertIn('Codango', self.amity.rooms['offices'])",
"def test_room_create(self):\n type(self).room = Room()",
"def test_amity_does_not_create_duplicte_rooms(self):\n self.amity.create_room([\"void\"], \"office\")\n response = self.amity.create_room([\"void\"], \"livingspace\")\n self.assertEqual(1, len(self.amity.rooms))",
"def test_add_person_staff_cannot_be_allocated_livingspace(self):\n self.amity.create_room([\"pluto\"], \"livingspace\")\n response = self.amity.add_person(\"staff\", \"Sakazuki Akainu\", \"Y\")\n self.assertIn(\"staff can not be allocated accommodation\", response)",
"def test_can_add_livingspace(self):\r\n self.amity.add_room(\"Troupon\", 'livingspaces')\r\n self.assertIn('Troupon', self.amity.rooms['livingspaces'])",
"def test_amenities_belongs_to_room(self):\n user2 = sample_user(\n email='diffuser@diff.com', \n password='diffuserpassword')\n room = sample_room(user=user2, name='Different room')\n room.amenities.add(sample_aminitie(name='Tv'))\n \n other_room = sample_room(user=self.user, name=\"palace room\")\n other_room.amenities.add(sample_aminitie(name='Internet'))\n\n res = self.client.get(AMENITY_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], room.name)",
"def test_register_meeting_room(self):\n\n data = {\n \"name\": \"Test Sala\",\n \"place\": \"Sem lugar\",\n \"description\": \"Sala de reuniao de teste\"\n }\n\n resp = self.client.post(\"/api/meeting-room/\", data=data)\n self.assertEqual(resp.status_code, 201)",
"def test_for_add_people(self):\n\t\tperson = Fellow(\"Abiodun\")\n\t\tself.office.add_person(person)\n\t\tself.assertGreater(len(self.office.people),0)",
"def test_add_person(self):\n role = \"FELLOW\"\n name = \"SAKAZUKI AKAINO\"\n accommodate = \"Y\"\n response = self.amity.add_person(role, name, accommodate)\n self.assertIn(\"has been added successfully to the system\", response)",
"def test_member_add_regression(self):\n self.assertEqual(self.organization.members.count(), 1)\n self.add_member(username='tester')\n self.assertEqual(self.organization.members.count(), 1)\n self.assertEqual(self.organization.owners.count(), 1)",
"def test_api_rooms_create_authenticated(self):\n user = UserFactory()\n jwt_token = AccessToken.for_user(user)\n\n response = self.client.post(\n \"/api/rooms/\",\n {\n \"name\": \"my room\",\n },\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 201)\n room = Room.objects.get()\n self.assertEqual(room.name, \"my room\")\n self.assertEqual(room.slug, \"my-room\")\n self.assertTrue(room.accesses.filter(role=\"owner\", user=user).exists())",
"def test_owner_add(self):\n self.assertEqual(self.organization.owners.count(), 1)\n self.add_owner(username='tester')\n self.assertEqual(self.organization.owners.count(), 2)",
"def test_reallocate_person(self):\n self.amity.create_room([\"venus\"], \"livingspace\")\n id_no = self.amity.get_person_id(\"Daniel Sumba\")\n response = self.amity.reallocate_person(id_no, \"venus\")\n self.assertIn(\"has been successfully moved\", response)",
"def test_create_game_room(self):\n url = '/api/gameroom/create/'\n data = {'name': 'test', \"password\": 'test', 'longitude': 1.1, 'latitude': 1.1}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(GameRoom.objects.count(), 1)\n self.assertEqual(GameRoom.objects.get(pk=1).name, 'test')",
"def test_owner_add(self):\n self.assertEqual(self.organization.owners.count(), 1)\n self.add_owner(username=\"tester\")\n self.assertEqual(self.organization.owners.count(), 1)\n invitation = Invitation.objects.for_object(self.organization).get()\n self.assertEqual(invitation.from_user, self.owner)\n self.assertEqual(invitation.to_user, User.objects.get(username=\"tester\"))",
"def test_register_meeting(self):\n\n data = {\n \"name\": \"Reunião de Test\",\n \"meeting_room\": self.meeting_room.id,\n \"start\": self.start,\n \"end\": self.end\n }\n\n resp = self.client.post(\"/api/meeting/\", data=data)\n self.assertEqual(resp.status_code, 201)",
"def test_api_rooms_create_anonymous(self):\n response = self.client.post(\n \"/api/rooms/\",\n {\n \"name\": \"my room\",\n },\n )\n\n self.assertEqual(response.status_code, 401)\n self.assertFalse(Room.objects.exists())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests that a staff member can not be allocated to a living space
|
def test_add_person_staff_cannot_be_allocated_livingspace(self):
self.amity.create_room(["pluto"], "livingspace")
response = self.amity.add_person("staff", "Sakazuki Akainu", "Y")
self.assertIn("staff can not be allocated accommodation", response)
|
[
"def test_unallocated_person(self):\n\n response = self.amity.add_person(\"staff\", \"Kuzan Aokiji\")\n self.assertIn(\"unallocated\", response)",
"def test_add_person_cannot_allocate_person_to_a_full_room(self):\n self.amity.create_room([\"jupiter\"], \"office\")\n self.amity.add_person(\"staff\", \"Monkey Garp\")\n self.amity.add_person(\"staff\", \"Kuzan Aokiji\")\n self.amity.add_person(\"staff\", \"Bosalino Kizaru\")\n self.amity.add_person(\"staff\", \"Monkey Dragon\")\n self.amity.add_person(\"staff\", \"Sakazuki Akainu\")\n self.amity.add_person(\"staff\", \"shem ogumbe\")\n response = self.amity.add_person(\"staff\", \"nico robin\")\n self.assertIn(\"unallocated\", response)",
"def test_list_of_unallocated_people(self):\r\n self.assertIsNotNone(self.amity.get_a_list_of_unallocated_people())",
"def test_no_illegal_withdraw_(self):\n task = TaskFactory.create(people_needed=2, time_needed=8)\n task.author = BlueBottleUserFactory.create()\n task.save()\n\n another_user = BlueBottleUserFactory.create()\n task_member = TaskMemberFactory.create(member=another_user, task=task)\n\n self.assertEquals(task.members.count(), 1)\n self.visit_path('/tasks/{0}'.format(task.id))\n\n self.assertFalse(self.scroll_to_and_click_by_css('.withdraw'))",
"def test_allocate_student_missing_user(allocation_class, random_value, course):\n assert allocation_class.allocate(random_value, course) == False",
"def test_move_leadership_no_dest(self):\n assert not self.move_leadership_valid(4, 1)",
"def test_disallow_fortified_invasion(self):\n londo = self.get_region(\"Orange Londo\")\n londo.owner = None\n londo.buff_with(db.Buff.fortified())\n\n when = now() + 60 * 60 * 24\n\n with self.assertRaises(db.TimingException):\n londo.invade(self.alice, when)\n\n n = (self.sess.query(db.Battle).count())\n self.assertEqual(n, 0)",
"def test_allocate_student_missing_course(allocation_class, created_user, random_value):\n assert allocation_class.allocate(created_user, random_value) == False",
"def test_non_staff(self):\n self._verify_non_staff_cannot_access(views.discussion_topics, \"GET\", [str(self.course.id)])",
"def test_external_contractor_cannot_see_students_outside_of_their_group(self):\n another_group = Group.objects.get(pk=2)\n\n self.assertFalse(\n can_user_view_students_list_for_group(self.teacher_user, another_group))",
"def test_fail_no_membership(self, no_membership_context):\n _, channel = no_membership_context\n\n @gate(role=Role.MEMBER)\n def fn(cid):\n return \"ok\"\n\n with pytest.raises(api_res.AccessError):\n fn(cid=channel.id)",
"def test_nonexistent_member(event):\n admin, event_id = event\n expect_error(invite_user, InputError, admin.username, \"aaa\", event_id)",
"def testMentorDeniedAccess(self):\n # seed a profile who is a mentor\n org = org_utils.seedOrganization(self.program.key())\n profile_utils.seedNDBProfile(\n self.program.key(), user=self.user, mentor_for=[org.key])\n\n access_checker = access.ProgramAdministratorAccessChecker()\n with self.assertRaises(exception.UserError) as context:\n access_checker.checkAccess(self.data, None)\n self.assertEqual(context.exception.message,\n access._MESSAGE_NOT_PROGRAM_ADMINISTRATOR)",
"def test_ensure_dm_nondm(self):\n msg = {'friends': []}\n self.assertRaises(ValueError, self.messagetools.ensure_dm, msg)",
"def test_block_creation_with_non_existent_office(self):\n CommonTestCases.admin_token_assert_in(\n self,\n create_block_with_non_existing_office,\n \"Office not found\"\n )",
"def _is_legal_allocation(self, servers_mem, services_mem):\n return np.sum(servers_mem) >= np.sum(services_mem)",
"def before_taking_check_not_inside(actor, x, ctxt) :\n loc = ctxt.world[Location(actor)]\n while not ctxt.world[IsA(loc, \"room\")] :\n if loc == x :\n if ctxt.world[IsA(x, \"container\")] :\n raise AbortAction(str_with_objs(\"{Bob|cap}'d have to get out of [the $x] first.\", x=x), actor=actor)\n elif ctxt.world[IsA(x, \"supporter\")] :\n raise AbortAction(str_with_objs(\"{Bob|cap}'d have to get off [the $x] first.\", x=x), actor=actor)\n else :\n raise Exception(\"Unknown object location type.\")\n loc = ctxt.world[Location(loc)]",
"def test_get_is_member_for_nonmember(self):\n raise NotImplementedError",
"def test_disallow_invadeception(self):\n londo = self.get_region(\"Orange Londo\")\n # For testing purposes, londo is now neutral\n londo.owner = None\n\n now = time.mktime(time.localtime())\n when = now + 60 * 60 * 24\n battle = londo.invade(self.alice, when)\n\n self.assert_(battle)\n\n with self.assertRaises(db.InProgressException):\n londo.invade(self.alice, when)\n\n n = (self.sess.query(db.Battle).count())\n self.assertEqual(n, 1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests that amity does not add people to full rooms
|
def test_add_person_cannot_allocate_person_to_a_full_room(self):
self.amity.create_room(["jupiter"], "office")
self.amity.add_person("staff", "Monkey Garp")
self.amity.add_person("staff", "Kuzan Aokiji")
self.amity.add_person("staff", "Bosalino Kizaru")
self.amity.add_person("staff", "Monkey Dragon")
self.amity.add_person("staff", "Sakazuki Akainu")
self.amity.add_person("staff", "shem ogumbe")
response = self.amity.add_person("staff", "nico robin")
self.assertIn("unallocated", response)
|
[
"def test_amity_does_not_create_duplicte_rooms(self):\n self.amity.create_room([\"void\"], \"office\")\n response = self.amity.create_room([\"void\"], \"livingspace\")\n self.assertEqual(1, len(self.amity.rooms))",
"def test_for_room_avaialble(self):\n\t\tself.assertIs(self.office.is_filled(),False)",
"def test_add_person_allocates_rooms(self):\n self.amity.create_room([\"mars\"], \"office\")\n self.amity.create_room([\"earth\"], \"livingspace\")\n response = self.amity.add_person(\"fellow\", \"monkey luffy\", \"y\")\n self.assertIn(\"successfully\", response)",
"def test_can_add_office(self):\r\n self.amity.add_room(\"Codango\", 'offices')\r\n self.assertIn('Codango', self.amity.rooms['offices'])",
"def test_is_empty_room(self):\n self.assertTrue(self.gamerules.is_empty_room(\"Dining\")) # Room can be occupided by more than 1 person\n self.assertTrue(self.gamerules.is_empty_room(\"Study-Library\")) #Empty Hallway, is empty\n self.assertFalse(self.gamerules.is_empty_room(\"Study-Hall\")) #Occupied Hallway",
"def test_add_person_staff_cannot_be_allocated_livingspace(self):\n self.amity.create_room([\"pluto\"], \"livingspace\")\n response = self.amity.add_person(\"staff\", \"Sakazuki Akainu\", \"Y\")\n self.assertIn(\"staff can not be allocated accommodation\", response)",
"def test_create_room(self):\n self.assertIn(\"created successfully\", self.amity.create_room([\"earth\"], \"office\"))",
"def test_can_add_livingspace(self):\r\n self.amity.add_room(\"Troupon\", 'livingspaces')\r\n self.assertIn('Troupon', self.amity.rooms['livingspaces'])",
"def test_api_rooms_create_anonymous(self):\n response = self.client.post(\n \"/api/rooms/\",\n {\n \"name\": \"my room\",\n },\n )\n\n self.assertEqual(response.status_code, 401)\n self.assertFalse(Room.objects.exists())",
"def test_list_of_unallocated_people(self):\r\n self.assertIsNotNone(self.amity.get_a_list_of_unallocated_people())",
"def test_amenities_belongs_to_room(self):\n user2 = sample_user(\n email='diffuser@diff.com', \n password='diffuserpassword')\n room = sample_room(user=user2, name='Different room')\n room.amenities.add(sample_aminitie(name='Tv'))\n \n other_room = sample_room(user=self.user, name=\"palace room\")\n other_room.amenities.add(sample_aminitie(name='Internet'))\n\n res = self.client.get(AMENITY_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], room.name)",
"def test_unallocated_person(self):\n\n response = self.amity.add_person(\"staff\", \"Kuzan Aokiji\")\n self.assertIn(\"unallocated\", response)",
"def test_api_rooms_list_anonymous(self):\n RoomFactory(is_public=False)\n RoomFactory(is_public=True)\n\n response = self.client.get(\"/api/rooms/\")\n self.assertEqual(response.status_code, 200)\n\n results = response.json()[\"results\"]\n self.assertEqual(len(results), 0)",
"def test_get_rooms(self):\n self.board.get_rooms",
"def test_some_rooms_are_booked_but_all_room_type_are_available(self):\n blocked_day_room121 = BlockedDayFactory(date=datetime.datetime(2021, 4, 28), hotel_room=self.hotelroom121)\n \n params = {\n 'max_price':'800',\n 'check_in':'2021-04-27',\n 'check_out':'2021-04-29'\n }\n\n response = self.client.get(reverse('units'), params)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data[0]['title'], self.hotel_lux.title)\n self.assertEqual(response.data[1]['title'], self.apartment.title)",
"def test_room_exits(self):\n assert not self.room.get_exits()\n another_room = Room()\n another_room.x, another_room.y, another_room.z = (5, 5, 6)\n assert self.room.get_exits() == {\"up\": another_room}\n del Room._caches[\"uid\"][another_room.uid]\n del another_room\n gc.collect()\n assert not self.room.get_exits()",
"def test_can_prepopulate(self):\r\n self.amity.pre_populate()\r\n self.assertEquals('Kiln', self.amity.rooms['offices'][0].name)\r\n self.assertEquals(len(self.amity.rooms['offices']), 10)\r\n self.assertEquals('Carat', self.amity.rooms['livingspaces'][0].name)\r\n self.assertEquals(len(self.amity.rooms['livingspaces']), 10)",
"def test_api_rooms_create_authenticated(self):\n user = UserFactory()\n jwt_token = AccessToken.for_user(user)\n\n response = self.client.post(\n \"/api/rooms/\",\n {\n \"name\": \"my room\",\n },\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 201)\n room = Room.objects.get()\n self.assertEqual(room.name, \"my room\")\n self.assertEqual(room.slug, \"my-room\")\n self.assertTrue(room.accesses.filter(role=\"owner\", user=user).exists())",
"def test_resolve_rooms_id_all_at_once(self):\n\n floor_0 = self.db_building[\"dxf\"][\"floors\"][0]\n floor_1 = self.db_building[\"dxf\"][\"floors\"][1]\n room_00 = floor_0[\"unidentified_rooms\"][0]\n room_02 = floor_0[\"unidentified_rooms\"][2]\n room_10 = floor_1[\"unidentified_rooms\"][0]\n room_12 = floor_1[\"unidentified_rooms\"][2]\n\n DXFRoomIdsResolver.resolve_rooms_id(\n self.building,\n None,\n None\n )\n\n self.assertEqual(floor_1[\"rooms\"][\"R023\"], self.final_rooms[\"R023\"])\n self.assertEqual(floor_1[\"rooms\"][\"R022\"], self.final_rooms[\"R022\"])\n self.assertEqual(floor_0[\"rooms\"][\"R002\"], self.final_rooms[\"R002\"])\n self.assertEqual(floor_0[\"rooms\"][\"R003\"], self.final_rooms[\"R003\"])\n self.assertTrue(room_00 not in floor_0[\"unidentified_rooms\"])\n self.assertTrue(room_02 not in floor_0[\"unidentified_rooms\"])\n self.assertTrue(room_10 not in floor_1[\"unidentified_rooms\"])\n self.assertTrue(room_12 not in floor_1[\"unidentified_rooms\"])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests that people that have not been allocated space are stored somewhere
|
def test_unallocated_person(self):
response = self.amity.add_person("staff", "Kuzan Aokiji")
self.assertIn("unallocated", response)
|
[
"def test_list_of_unallocated_people(self):\r\n self.assertIsNotNone(self.amity.get_a_list_of_unallocated_people())",
"def test_add_person_cannot_allocate_person_to_a_full_room(self):\n self.amity.create_room([\"jupiter\"], \"office\")\n self.amity.add_person(\"staff\", \"Monkey Garp\")\n self.amity.add_person(\"staff\", \"Kuzan Aokiji\")\n self.amity.add_person(\"staff\", \"Bosalino Kizaru\")\n self.amity.add_person(\"staff\", \"Monkey Dragon\")\n self.amity.add_person(\"staff\", \"Sakazuki Akainu\")\n self.amity.add_person(\"staff\", \"shem ogumbe\")\n response = self.amity.add_person(\"staff\", \"nico robin\")\n self.assertIn(\"unallocated\", response)",
"def IsAllocated(self) -> bool:",
"def test_allocate_student_missing_user(allocation_class, random_value, course):\n assert allocation_class.allocate(random_value, course) == False",
"def test_add_person_staff_cannot_be_allocated_livingspace(self):\n self.amity.create_room([\"pluto\"], \"livingspace\")\n response = self.amity.add_person(\"staff\", \"Sakazuki Akainu\", \"Y\")\n self.assertIn(\"staff can not be allocated accommodation\", response)",
"def _is_legal_allocation(self, servers_mem, services_mem):\n return np.sum(servers_mem) >= np.sum(services_mem)",
"def test_memoryForPID(self):\n\n memory = memoryForPID(os.getpid())\n self.assertNotEqual(memory, 0)",
"def test_already_allocated(self):\n ad_rep = AD_REP_FACTORY.create_ad_rep()\n order = ORDER_FACTORY.create_order()\n ad_rep_order = AdRepOrder.objects.create(ad_rep=ad_rep, order=order)\n BonusPoolAllocation.objects.create(ad_rep_order=ad_rep_order,\n ad_rep=ad_rep, consumer_points=1, amount=Decimal('1'))\n ALLOCATE_BONUS_POOL.run(ad_rep_order.id)\n self.assertEqual(BonusPoolAllocation.objects.filter(\n ad_rep_order=ad_rep_order).count(), 1)",
"def test_add_person_allocates_rooms(self):\n self.amity.create_room([\"mars\"], \"office\")\n self.amity.create_room([\"earth\"], \"livingspace\")\n response = self.amity.add_person(\"fellow\", \"monkey luffy\", \"y\")\n self.assertIn(\"successfully\", response)",
"def test_memory_consumption_while_recreating_state_db():\n # TODO:\n # p = psutil.Process()\n # print(p.memory_info_ex())",
"def __check_space():\n\n global MIN_SPACE, dbx\n\n # Checks if there is a considerable amount of available space in the user account (at least 500MB)\n allocated_space = dbx.users_get_space_usage().allocation.get_individual().allocated # Allocated space\n used_space = dbx.users_get_space_usage().used # Used space\n available_space = allocated_space - used_space # Available space\n\n # Notifies the user that the space may be insufficient (< 500 MB)\n if available_space < MIN_SPACE:\n print(Fore.YELLOW + \" Warning!\" + Fore.RESET + \" The available space may be insufficient (500 MB). \"\n \"It is advisable to increase it before continuing the execution because an error could occur later.\")\n\n time.sleep(2)",
"def test_allocate_student_missing_course(allocation_class, created_user, random_value):\n assert allocation_class.allocate(created_user, random_value) == False",
"def test_room_exits(self):\n assert not self.room.get_exits()\n another_room = Room()\n another_room.x, another_room.y, another_room.z = (5, 5, 6)\n assert self.room.get_exits() == {\"up\": another_room}\n del Room._caches[\"uid\"][another_room.uid]\n del another_room\n gc.collect()\n assert not self.room.get_exits()",
"def hasAssignedStorage(self) -> bool:\n ...",
"def IsAllocated(self):\n if self._stat_object is None:\n self._stat_object = self._GetStat()\n return self._stat_object and self._stat_object.is_allocated",
"def test_is_out_of_cpu_memory(self) -> None:\n cpu_oom_error = RuntimeError(\"DefaultCPUAllocator: can't allocate memory\")\n self.assertTrue(is_out_of_cpu_memory(cpu_oom_error))\n not_cpu_oom_error = RuntimeError(\"RuntimeError: blah\")\n self.assertFalse(is_out_of_cpu_memory(not_cpu_oom_error))",
"def testLeaks(self):\n if dafBase:\n gc.collect()\n global memId0, nleakPrintMax\n nleak = dafBase.Citizen.census(0, memId0)\n if nleak != 0:\n plural = \"s\" if nleak != 1 else \"\"\n print(\"\\n%d Object%s leaked:\" % (nleak, plural))\n\n if nleak <= nleakPrintMax:\n print(dafBase.Citizen.census(memId0))\n else:\n census = dafBase.Citizen.census()\n print(\"...\")\n for i in range(nleakPrintMax - 1, -1, -1):\n print(census[i].repr())\n\n self.fail(\"Leaked %d block%s\" % (nleak, plural))",
"def test_owner_checking_success(self):\r\n self.assertEqual(hw.Garage.owner_checking(self.change_owner_number[0]), None)\r\n self.assertEqual(hw.Garage.owner_checking(self.change_owner_number[1]), self.change_owner_number[1])",
"def test_owner_no_ownership(self):\n self.assert_ownership(True)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that amity can reallocate people to other rooms
|
def test_reallocate_person(self):
self.amity.create_room(["venus"], "livingspace")
id_no = self.amity.get_person_id("Daniel Sumba")
response = self.amity.reallocate_person(id_no, "venus")
self.assertIn("has been successfully moved", response)
|
[
"def do_reallocate_person(self, args):\n # try:\n if amity.validate_email(args['<email>']) == \"Invalid\":\n error_msg = args['<email>'] + \" is not a valid email.\"\n click.secho(error_msg, fg='red')\n return\n if amity.validate_room_name(args['<new_room>']) == \"Invalid\":\n click.secho(\n args['<new_room>'] + \" is not a valid room name, change and try again!!\", fg='red')\n\n email = args['<email>']\n new_room = args['<new_room>']\n if not email in amity.all_persons:\n click.secho(email + \" not in the system!!\", fg='red')\n return\n else:\n if amity.all_persons[email].person_type == \"Fellow\":\n person = amity.fellows[email]\n if person.office == new_room or person.living_space == new_room:\n click.secho(\n \"Been here all along,kindly let be!\", fg='cyan')\n return\n elif amity.all_persons[email].person_type == \"Staff\":\n person = amity.staff[email]\n if person.office == new_room:\n\n click.secho(\n \"Been here all along,kindly let be\", fg='cyan')\n return\n else:\n click.secho(\"An alien type discovered!!\", fg='red')\n return\n\n state = amity.reallocate_person(\n amity.all_persons[args['<email>']], args['<new_room>'])\n if not state == -1:\n click.secho(\"Room reallocation successfully done\", fg='green')\n # except:\n\n # click.secho(\n #\"An unexpected error occured while running the comand\", fg='red')",
"def test_add_person_cannot_allocate_person_to_a_full_room(self):\n self.amity.create_room([\"jupiter\"], \"office\")\n self.amity.add_person(\"staff\", \"Monkey Garp\")\n self.amity.add_person(\"staff\", \"Kuzan Aokiji\")\n self.amity.add_person(\"staff\", \"Bosalino Kizaru\")\n self.amity.add_person(\"staff\", \"Monkey Dragon\")\n self.amity.add_person(\"staff\", \"Sakazuki Akainu\")\n self.amity.add_person(\"staff\", \"shem ogumbe\")\n response = self.amity.add_person(\"staff\", \"nico robin\")\n self.assertIn(\"unallocated\", response)",
"def test_add_person_allocates_rooms(self):\n self.amity.create_room([\"mars\"], \"office\")\n self.amity.create_room([\"earth\"], \"livingspace\")\n response = self.amity.add_person(\"fellow\", \"monkey luffy\", \"y\")\n self.assertIn(\"successfully\", response)",
"def test_amity_does_not_create_duplicte_rooms(self):\n self.amity.create_room([\"void\"], \"office\")\n response = self.amity.create_room([\"void\"], \"livingspace\")\n self.assertEqual(1, len(self.amity.rooms))",
"def reallocate_room(self, person_name, room_name):\r\n self.check_str(RoomName=room_name, PersonName=person_name)\r\n #Get the room type\r\n room_type = self.get_room_type(room_name)\r\n room_dict = self.get_dict(room_type.upper())\r\n\r\n if room_dict[room_name].is_room_assignable:\r\n return self.reassign_room(room_type.upper(), room_name, person_name)\r\n\r\n if not room_dict[room_name].is_room_assignable:\r\n return 'The room specified is not assignable'\r\n\r\n return 'The room '+room_name+' does not exist'",
"def reallocate_person(self, person_id, new_room_name):\n new_room_name = new_room_name.lower().strip()\n if person_id in self.persons.keys():\n if new_room_name in self.all_rooms_list:\n #Get Current Persons Room\n current_office_room = self.persons[person_id].proom_name\n #Reallocate Person to New Room\n #Reallocate Fellow to new Living room\n if new_room_name in self.living_space:\n current_living_space = self.persons[person_id].lroom_name\n #Check if staff is being allocated a living space\n if self.persons[person_id].person_type.upper() == 'STAFF':\n print ('Staff are not supposed to be assigned Living space.')\n else:\n #Prevent reallocating Living space to fellows who didnt request living space\n if current_living_space =='':\n print(\"Fellow didnt opt in Livingspace program\")\n #Check if Person is already in the room being reallocated to\n elif current_living_space == new_room_name:\n print('Person already belongs to that room')\n else:\n #Delete Person From Living Room\n\n #Add person to new Living Room\n if (len(self.living_space[new_room_name].members)\n < self.max_number_of_people_in_living_space):\n self.living_space[current_room].members.remove(person_id)\n self.living_space[new_room_name].members.append(person_id)\n self.persons[person_id].lroom_name = new_room_name\n print('Reallocation succesful')\n else:\n print('Rooms full to capcity')\n if new_room_name in self.office:\n if current_office_room == new_room_name:\n print('Person already belongs to that room')\n else:\n #Delete Person from current Office and assign them new Office\n if (len(self.office[new_room_name].members)\n < self.max_number_of_people_in_office):\n self.office[current_office_room].members.remove(person_id)\n self.office[new_room_name].members.append(person_id)\n self.persons[person_id].proom_name = new_room_name\n print('Reallocation succesful')\n else:\n print('Room full to capacity')\n else:\n print('Room Does\\'t Exist')\n else:\n print('Person Doesn\\'t Exist')",
"def reassign_room(self, room_type, room_name, person_name):\r\n #Get room individual is assigned to\r\n returned_value = self.get_room_assigned(room_type.upper(), person_name)\r\n if not returned_value:\r\n return 'Check that the individual exists'\r\n\r\n if not returned_value['room']:\r\n self.assign_individual_room(room_type.upper(), room_name, returned_value['person'])\r\n return True\r\n\r\n self.unallocate_room(room_type.upper(), returned_value['room'], returned_value['person'])\r\n self.assign_individual_room(room_type.upper(), room_name, returned_value['person'])\r\n return True",
"def test_room_exits(self):\n assert not self.room.get_exits()\n another_room = Room()\n another_room.x, another_room.y, another_room.z = (5, 5, 6)\n assert self.room.get_exits() == {\"up\": another_room}\n del Room._caches[\"uid\"][another_room.uid]\n del another_room\n gc.collect()\n assert not self.room.get_exits()",
"def test_can_add_office(self):\r\n self.amity.add_room(\"Codango\", 'offices')\r\n self.assertIn('Codango', self.amity.rooms['offices'])",
"def test_base_HRAdminCanInvalidatePerson(self):\n self.login(user_name='test_hr_admin')\n # Try to acceed the person module through restrictedTraverse\n # This will test the security of the module\n person_module_id = self.portal.getDefaultModuleId(portal_type='Person')\n person_module = self.portal.restrictedTraverse(person_module_id)\n # Create a user\n new_person = person_module.newContent(portal_type=\"Person\",\n first_name='Test', \n last_name='Invalidated Vifib User',\n )\n # open assignment in order to trigger open order creation\n assignment = new_person.newContent(portal_type='Assignment')\n assignment.portal_workflow.doActionFor(assignment, \"open_action\")\n # tic to have open order created and indexed\n self.stepTic()\n new_person.edit(career_role='client', default_email_text=\"test@example.com\")\n new_person.portal_workflow.doActionFor(new_person, \"validate_action\")\n new_person.portal_workflow.doActionFor(new_person, \"invalidate_action\")",
"def test_amenities_belongs_to_room(self):\n user2 = sample_user(\n email='diffuser@diff.com', \n password='diffuserpassword')\n room = sample_room(user=user2, name='Different room')\n room.amenities.add(sample_aminitie(name='Tv'))\n \n other_room = sample_room(user=self.user, name=\"palace room\")\n other_room.amenities.add(sample_aminitie(name='Internet'))\n\n res = self.client.get(AMENITY_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], room.name)",
"def test_add_person_staff_cannot_be_allocated_livingspace(self):\n self.amity.create_room([\"pluto\"], \"livingspace\")\n response = self.amity.add_person(\"staff\", \"Sakazuki Akainu\", \"Y\")\n self.assertIn(\"staff can not be allocated accommodation\", response)",
"def test_create_room(self):\n self.assertIn(\"created successfully\", self.amity.create_room([\"earth\"], \"office\"))",
"def test_add_delete_reading_rooms(self):\n\n census = Office.objects.get(\n slug='department-of-commerce--census-bureau')\n all_rooms = census.reading_room_urls.all().count()\n self.assertEqual(0, all_rooms)\n\n data = {\n 'reading_rooms': [\n ['Url One', 'http://urlone.gov'],\n ['Url Two', 'http://urltwo.gov']]}\n update_reading_rooms(census, data)\n all_rooms = census.reading_room_urls.all()\n self.assertEqual(2, len(all_rooms))\n\n data = {\n 'reading_rooms': [\n ['Url One', 'http://urlone.gov'],\n ['Url Three', 'http://urlthree.gov']]}\n update_reading_rooms(census, data)\n rr_count = census.reading_room_urls.all().count()\n self.assertEqual(2, rr_count)",
"def test_unallocated_person(self):\n\n response = self.amity.add_person(\"staff\", \"Kuzan Aokiji\")\n self.assertIn(\"unallocated\", response)",
"def test_can_add_livingspace(self):\r\n self.amity.add_room(\"Troupon\", 'livingspaces')\r\n self.assertIn('Troupon', self.amity.rooms['livingspaces'])",
"def test_room_create(self):\n type(self).room = Room()",
"def test_allocations_do_not_exceed_arrivals():\n # the actual allocation does not exceed the arrivals\n arrivals = [10, 10, 10]\n overzealous_schedule = simulate_allocations(arrivals, [1000000, 1000000, 1000000], 0)\n assert overzealous_schedule.schedule[0].total == 10\n assert overzealous_schedule.schedule[1].total == 10\n assert overzealous_schedule.schedule[2].total == 10\n assert len(overzealous_schedule.schedule) == 3\n arrivals = [10, 10, 10]\n exact_schedule = simulate_allocations(arrivals, [10, 10, 10], 0)\n assert exact_schedule.schedule[0].total == 10\n assert exact_schedule.schedule[1].total == 10\n assert exact_schedule.schedule[2].total == 10\n assert len(exact_schedule.schedule) == 3\n\n # for all carryovers, the allocation does not exceed the maximum slots allowed in the allocation schedule\n arrivals = [10, 10, 10]\n schedule_with_carryover = simulate_allocations(arrivals, [8, 8, 8, 8], 0)\n assert schedule_with_carryover.schedule[0].total == 8\n assert schedule_with_carryover.schedule[1].total == 8\n assert schedule_with_carryover.schedule[2].total == 8\n assert schedule_with_carryover.schedule[3].total == 6\n assert len(schedule_with_carryover.schedule) == 4",
"def testOccupiedNode(self):\n\n # Create game\n player_1 = Player(randomAction)\n player_2 = Player(randomAction)\n player_3 = Player(randomAction)\n player_4 = Player(randomAction)\n game_manager = GameManager([player_1, player_2, player_3, player_4])\n game_manager.turn_counter = 7\n\n # Give player 1 enough resources for building settlement\n player_1.resource_cards = [3] * 5\n\n # 1st test another player with settlement on node 5\n game_manager.game_board.nodes[5].settlement = [0, 1, 0, 0]\n self.assertEqual(game_manager.buildSettlement(player_1, 5), False)\n\n # 1st test another player with city on node 7\n game_manager.game_board.nodes[7].city = [0, 1, 0, 0]\n self.assertEqual(game_manager.buildSettlement(player_1, 7), False)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that amity can add people from a .txt file
|
def test_load_people(self):
response = self.amity.load_people("people.txt")
self.assertIn("successfully", response)
|
[
"def test_add_person(self):\n role = \"FELLOW\"\n name = \"SAKAZUKI AKAINO\"\n accommodate = \"Y\"\n response = self.amity.add_person(role, name, accommodate)\n self.assertIn(\"has been added successfully to the system\", response)",
"def load_people(self, filename):\n #Open file to write lines with errors to during load_people\n my_error_file = open('errors' + '.txt','w')\n #Check whether file exists in the application root folder\n if os.path.exists(filename+'.txt') == False:\n return \"File Provided is neither .txt nor does it exist\"\n # if os.stat(filename+'.txt') == 0:\n # return \"File is empty\"\n else:\n #print(\"File is empty\")\n try:\n my_file_read = open(filename + '.txt','r')\n for num, line in enumerate(my_file_read, 1):\n # Remove newline character from the lines in the file\n line = line.strip()\n #Split the line into a list\n list_of_arguments_in_file = line.split()\n #Assemble split line into variables that can be used to call add_person\n # This takes care of cases where a fellow has requested for accomodation\n if len(list_of_arguments_in_file) == 5:\n person_id = list_of_arguments_in_file[0]\n last_name = list_of_arguments_in_file[1]\n first_name = list_of_arguments_in_file[2]\n person_type = list_of_arguments_in_file[3]\n wants_accommodation = list_of_arguments_in_file[4]\n #Add Person to the dojo using values from the line\n self.add_person(person_id, last_name,\n first_name, person_type,\n wants_accommodation)\n #Assemble split line into variables that can be used to call add_person\n # This takes care of cases where a fellow has not requested for accomodation\n elif len(list_of_arguments_in_file) == 4:\n person_id = list_of_arguments_in_file[0]\n last_name = list_of_arguments_in_file[1]\n first_name = list_of_arguments_in_file[2]\n person_type = list_of_arguments_in_file[3]\n #Add Person to the dojo using values from the line\n self.add_person(person_id, last_name,\n first_name, person_type,\n wants_accommodation)\n #Write to errors.txt lines with errors\n else:\n my_error_file.write(line\n +' line number ('+ str(num)+')\\n')\n print('Arguments in this line were incorrect: '\\\n + 'line(' + str(num)+')')\n return \"File read succesfully, lines with errors\"\\\n \" have been written to 'errors.txt'\"\n except:\n return ('File Provided is either not a .txt or it doesnt exist')\n my_error_file.close()",
"def test_read_instructors(self):\n self.assertEqual(self.USNA.read_instructors(\"/Users/nickcohron/Stevens/Software Engineering Tools & Techniques/Code/Software-Engineering-Tools/Tests/Test9.txt\"), None) # file with empty lines\n self.assertRaises(SystemExit, self.USNA.read_students, \"not_there.txt\") # file does not exist",
"def test_correct_file(self):\n contents = \"\\n\".join(self.lines)\n aavso_file = create_inmemory_file(\"data.txt\", contents.encode(\"utf-8\"))\n with self.assert_instance_created(Observation, star=self.star, notes=\"test3\"):\n request = self.factory.post(\n data={\"aavso_file\": aavso_file,}, user=self.user\n )\n request.observer = self.observer\n response = self.view(request)\n self.assert_redirect(response)\n self.assert_message_exists(\n request, messages.SUCCESS, _(\"File uploaded successfully!\")\n )",
"def addStudentsFromFile(self, filename):\n filereader=open(filename)\n lines=filereader.readlines()\n for line in lines[5:]:\n line=line.strip('\\n')\n rollno,name,*hwk=line.split(':')\n #Convert homework into numbers\n marks=[eval(mark) for mark in hwk]\n #create a student\n student=Student(rollno,name)\n #set the marks\n student.setMarks(marks)\n #add to list\n self.addStudent(student)\n #close file\n filereader.close()",
"def add_exercise(name_of_person, exercise_type):\n f = open((name_of_person + \"_exercise.txt\"), \"a+\")\n f.write(exercise_type+str(date_time())+\"\\n\")\n print(\"Exercise type added Successfully\")\n f.close()",
"def Load_In_Contacts(self):\n text_file = open(\"contactbook.txt\", \"r\")\n full_text = text_file.readlines() # reads in ALL the text into a list, split by newline\n text_file.close()\n\n for i in range(len(full_text)):\n split_text = full_text[i].split(\"'\") # Creates a list, seperating the full_text on apostrophes\n name = split_text[0]\n address = split_text[1]\n phoneNumber = split_text[2]\n birthday = split_text[3][0:10]\n\n self.Add_New_Contact(name,address,phoneNumber,birthday)",
"def test_create(self):\n\n # use local file instead of web\n foo = os.path.join(os.getcwd(), *TESTDATAPATH, 'authors.ttl')\n f = AuthorFeeder(foo)\n assert_equals(foo, f.src)",
"def test_for_add_people(self):\n\t\tperson = Fellow(\"Abiodun\")\n\t\tself.office.add_person(person)\n\t\tself.assertGreater(len(self.office.people),0)",
"def main():\r\n textfile = input(\"input filename: \")\r\n list = readStudents(textfile)\r\n placeStudents(list)",
"def add_names_from_text():\n currentPosition = None\n currentGender = None\n\n with open(\"Database/Resources/NameCollection.txt\") as file:\n while True:\n line = file.readline()\n if not line:\n break\n\n if len(line) > 1:\n if line == \"--------------------------\\n\":\n currentPosition = file.readline().replace(\"\\n\", \"\")\n file.readline()\n elif line == \"##########################\\n\":\n currentGender = file.readline().replace(\"\\n\", \"\")\n file.readline()\n elif currentPosition is not None and currentGender is not None:\n add_name_details(line.replace(\"\\n\", \"\"), currentPosition, currentGender)\n\n file.close()",
"def test_read_students(self):\n self.assertEqual(self.USNA.read_students(\"/Users/nickcohron/Stevens/Software Engineering Tools & Techniques/Code/Software-Engineering-Tools/Tests/Test9.txt\"), None) # file with empty lines\n self.assertRaises(SystemExit, self.USNA.read_students, \"not_there.txt\") # file does not exist",
"def test_add_person_allocates_rooms(self):\n self.amity.create_room([\"mars\"], \"office\")\n self.amity.create_room([\"earth\"], \"livingspace\")\n response = self.amity.add_person(\"fellow\", \"monkey luffy\", \"y\")\n self.assertIn(\"successfully\", response)",
"def test_add_actor(self):\n actor_name = \"test_actor\"\n self.api.add_actor(name=actor_name, tags=['asd'])\n actor=self.api.entity_search(name=actor_name)\n self.assertEqual(actor[0]['name'], actor_name)",
"def add_file(self, filename):\n for line in [line.rstrip().lower() for line in open(filename, errors='ignore').readlines()]:\n self.add_sentence(line)",
"def test_file_output(self):\n donor_list = [[\"James Smith\",91661.25],[\"Robert Jones\",51227.53],[\"John Williams\",41113.42]]\n message = \"Dear {:s},\\n\\\n Thank you for donating ${:,.2f}.\\n\\\n Sincerely,\\n\\\n Your Local Charity\"\n comp_dict = {}\n for item in donor_list:\n comp_dict[item[0]] = message.format(*item)\n for k, v in comp_dict.items():\n with open (k + \".txt\", 'r') as f:\n contents = f.read()\n self.assertEqual(contents, v)",
"def test_member_add_regression(self):\n self.assertEqual(self.organization.members.count(), 1)\n self.add_member(username='tester')\n self.assertEqual(self.organization.members.count(), 1)\n self.assertEqual(self.organization.owners.count(), 1)",
"def add_food(name_of_person, food_item):\n f = open((name_of_person + \"_food.txt\"), \"a+\")\n f.write(food_item+str(date_time())+\"\\n\")\n f.close()\n print(\"Food item added Successfully\")",
"def test_register_intent_intent_file(self):\n self._test_intent_file(SimpleSkill6())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Instantiate a TopicModel object.
|
def __init__(self, model_path, dataset_path):
self.model = LdaModel.load(model_path)
self.dataset = ArXivDataset.load(dataset_path)
self.num_topics = self.model.num_topics
self.topic_names = list(range(self.num_topics))
self.topics = self.model.show_topics(num_topics=self.num_topics)
|
[
"def load_gensim_model(self, gensim_model, corpus, dictionary, dimension_range=[5, 20], n_samples=5, n_initializations=10, custom_params=None):\n\n parameters = {\n \"num_topics\":\n {\"type\": int, \"mode\": \"range\", \"values\": dimension_range}\n }\n\n if custom_params:\n parameters = custom_params\n\n sampling_parameters = self._compute_param_combinations(\n parameters, n_samples)\n\n topic = TopicModel(\n \"gensim\", gensim_model, corpus, dictionary, parameters, sampling_parameters, n_samples, n_initializations, [], [], [], [])\n\n self.models.append(topic)\n\n return self",
"def load_sklearn_model(self, sklearn_model, document_vectors, vectorizer, dimension_range=[5, 20], n_samples=5, n_initializations=10, custom_params=None):\n parameters = {\n \"n_components\":\n {\"type\": int, \"mode\": \"range\", \"values\": dimension_range}\n }\n\n if custom_params:\n parameters = custom_params\n\n sampling_parameters = self._compute_param_combinations(\n parameters, n_samples)\n\n topic = TopicModel(\n \"sklearn\", sklearn_model, document_vectors, vectorizer, parameters, sampling_parameters, n_samples, n_initializations, [], [], [], [])\n\n self.models.append(topic)\n\n return self",
"def _construct_model(self, startprob, transmat, emissionprob, vocabulary):\n try:\n from hmmlearn import hmm\n except ImportError:\n raise get_hmmlearn_import_error()\n self.model_ = hmm.MultinomialHMM(n_components=2)\n if isinstance(startprob, list):\n startprob = np.array(startprob)\n if isinstance(transmat, list):\n transmat = np.array(transmat)\n self.model_.startprob_ = startprob\n self.model_.transmat_ = transmat\n self.model_.emissionprob_ = emissionprob\n self.vocabulary_ = vocabulary\n return self",
"def create_from_model_file(cls, model_fp):\n\t\tkeras_model = load_model(model_fp)\n\t\treturn cls(keras_model)",
"def create_topic():\n # Get the user's id from access token\n uid = get_jwt_identity()\n\n # If no user id, return error\n if not uid:\n return make_response(\n jsonify({'error': 'Could not verify!'}),\n 401,\n {'WWW-Authentication': 'Basic realm=\"Login required!\"'})\n\n # Try to get user from database\n query = User.query.filter_by(public_id=uid)\n\n try:\n user = query.one()\n\n # If no result found, return error\n except NoResultFound:\n return jsonify({'error': 'No result found!'}), 401\n\n # If some other sqlalchemy error is thrown, return error\n except SQLAlchemyError:\n return jsonify({'error': 'Some problem occurred!'}), 400\n\n # Get topic data from request\n data = request.get_json()\n\n # Verify that all required topic data was sent\n topic_columns = ['name', 'profeciency']\n if not any(key in data for key in topic_columns):\n return make_response(jsonify({'error': 'Missing data!'}), 400)\n\n # Create topic object\n topic = Topic(\n name=data['name'],\n profeciency=data['profeciency'])\n\n # Try to add topic to database\n try:\n db.session.add(topic)\n db.session.commit()\n\n # If topic name already in database, return error\n except IntegrityError:\n return jsonify({\n 'error': 'Topic with name already exists'\n }), 400\n\n # If some other sqlalchemy error is thrown, return error\n except SQLAlchemyError:\n return jsonify({'error': 'Some problem occurred!'}), 400\n\n # Serialze the topic object and return json response\n topic_schema = TopicSchema()\n output = topic_schema.dump(topic).data\n\n return jsonify({\n 'success': 'Successfully retrieved topic.',\n 'topic': output,\n }), 200",
"def createTopic(request):\n\tif request.method == 'POST':\n\t\ttry:\n\t\t\ttext = request.POST.get(\"text\")\n\t\t\t# start critical section\n\t\t\tmutex_create_topic.acquire()\n\t\t\ttry:\n\t\t\t\tnew_topic_id = topic_list.getCurrentTopicId()\n\t\t\t\ttopic = Topic.create(new_topic_id, text)\n\t\t\t\ttopic_list.addTopic(topic)\n\t\t\tfinally:\n\t\t\t\tmutex_create_topic.release()\n\t\t\t# end critical section\n\t\t\treturn HttpResponse(status=200)\n\t\texcept:\n\t\t\treturn HttpResponse(status=400)\n\treturn HttpResponse(status=400)",
"def __init__(self, value, topics):\n if isinstance(topics, Topic):\n self.__topics = topics\n else:\n self.__topics = Topic(topics)\n self.__value = value\n self.timestamp = time.time()",
"def create_lda(self):\n print(\"Creating LDA topic model from \" + str(len(self.docs)) + \" documents.\")\n num_topics = self.number_of_topics\n chunk_size = int(len(self.docs)/100)\n if chunk_size < 1:\n chunk_size = 1 # small number of sentences\n\n all_tokens = sum(self.docs, [])\n # process our stop words like all our words have been processed\n tokens_stop = []\n for word in get_stop_words('en'):\n tokens_stop.extend(self.to_bow(word))\n\n tokens_once = set(word for word in set(all_tokens) if all_tokens.count(word) == 1)\n # remove words that appear only once or are stop words\n texts = [[word for word in sentence if word not in tokens_once and word not in tokens_stop] for sentence in self.docs]\n\n # constructing topic model\n dict_lda = corpora.Dictionary(texts)\n mm_corpus = [dict_lda.doc2bow(text) for text in texts]\n self.lda = models.ldamodel.LdaModel(corpus=mm_corpus, id2word=dict_lda, num_topics=num_topics, update_every=1, chunksize=chunk_size, passes=1)\n #topics = lda.print_topics(self.number_of_topics)\n\n # get list of lda topic names\n print(self.FORMAT_LINE)\n # printing each topic\n for topic in self.lda.print_topics(self.number_of_topics):\n print(topic)\n print(self.FORMAT_LINE)\n\n print(\"\\n\")\n print(\"- Begin naming topics -\")\n # naming each topic\n i = 1\n for topic in self.lda.print_topics(self.number_of_topics):\n print(\"\\t(\" + str(i) + \") \"+ topic)\n self.topic_names.append(input(\"> A name for topic (\" + str(i) + \"): \"))\n i += 1\n print(\"Done creating LDA topic model\")",
"def _construct(self, model_config):\n pass",
"def instantiate_model(model_type, model_params, *args):\n m = ModelBuilder(model_params)\n models = {'bibigru':'build_bibigru',\n 'gru_cnn': 'build_gru_cnn',\n 'pooled_gru':'build_pooled_gru',\n 'cnn_gru':'build_cnn_gru',\n 'ngram_cnn':'build_ngram_cnn',\n 'test':'build_test'}\n\n if model_type in models:\n builder_name = models[model_type]\n builder = getattr(m, builder_name)\n return builder(*args)#max_sequence_length, max_nb_words, embedding_dimension, embedding_matrix)\n\n else:\n raise Exception(\"Model %s not implemented\" % model_type)",
"def prepare(topic_model, corpus, dictionary, doc_topic_dist=None, **kwargs):\n opts = fp.merge(_extract_data(topic_model, corpus, dictionary, doc_topic_dist), kwargs)\n return pyLDAvis.prepare(**opts)",
"def __init__(self, model, lexicon):\n if os.path.exists(model):\n self.model = pickle.load(open(model, \"rb\"))\n else:\n self.model = self.create_model(model, lexicon)",
"def make_model(self, data):\n return self.MODEL(**data)",
"def create_sklearn_model(model_info):\n model_name, model_class, model_pars = model_info\n model = model_class(**model_pars)\n return model, model_pars",
"def create_topic():\n print(f'Starting creating topic {broker_config.topic_name}')\n\n topic_name = broker_config.topic_name\n exist_topics = admin_client.list_topics()\n\n if topic_name not in exist_topics:\n topic = NewTopic(\n name=topic_name,\n num_partitions=broker_config.topic_partition_count,\n replication_factor=1,\n )\n admin_client.create_topics(\n new_topics=[topic],\n validate_only=False,\n )\n print(f'Topic `{topic_name}` was created')\n else:\n print(f'Topic `{topic_name}` already exists.')",
"def create_topic(client, project_name, topic):\n topic = client.projects().topics().create(\n name=get_full_topic_name(project_name, topic),\n body={}).execute(num_retries=NUM_RETRIES)\n return topic",
"def construct_topics_df(model, reduced=False):\n if reduced:\n\n topic_words = model.topic_words_reduced\n word_scores = model.topic_word_scores_reduced\n topic_sizes = model.topic_sizes_reduced\n else:\n\n topic_words = model.topic_words\n word_scores = model.topic_word_scores\n topic_sizes = model.topic_sizes.values.tolist()\n\n df_topics = construct_df_topic_words_scores(\n topic_words=topic_words,\n word_scores=word_scores,\n digits=2\n ).iloc[:, 0:10]\n\n df_topics[\"size\"] = topic_sizes\n if reduced:\n df_topics[\"hierarchy\"] = model.topic_hierarchy\n cols = [\"hierarchy\", \"size\"] + df_topics.columns.tolist()[0:10]\n else:\n df_topics[\"topic nr\"] = list(range(0, len(topic_sizes)))\n cols = [\"topic nr\", \"size\"] + df_topics.columns.tolist()[0:10]\n\n return df_topics[cols]",
"def __init__(\n self, task: Optional[ClassyTask] = None, model: Optional[ClassyModel] = None\n ) -> None:\n self.task = task\n if task is None:\n assert model is not None, \"Need to specify a model if task is None\"\n self.model = model\n else:\n assert model is None, \"Cannot pass a model if task is not None\"\n self.model = task.model",
"def _create(cls, model_class, *args, **kwargs):\n return model_class(*args, **kwargs)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Predict topics for a piece of text.
|
def predict(self, text):
bow_transformed = self.dataset.transform([text])[0]
topic_predictions = self.model.get_document_topics(bow_transformed)
sorted_predictions = sorted(topic_predictions, key=lambda x: x[1],
reverse=True)
sorted_predictions = [(self.topic_names[topic_idx], prob)
for (topic_idx, prob) in sorted_predictions]
return sorted_predictions
|
[
"def predict(self, text):\n\n if not models:\n self.__init__(self.filename, force_load=True)\n vec = self.tokenize(text)\n print(\"BoW:\")\n print(vec)\n topics = np.array(self.model[vec], dtype=[('topic_id', int), ('confidence', float)])\n topics[::-1].sort(order=\"confidence\")\n # This may seem super weird, but it works and it is actually more efficient\n # see https://stackoverflow.com/questions/26984414/efficiently-sorting-a-numpy-array-in-descending-order\n print(topics)\n return topics",
"def predict(self, text):",
"def predict_topic(self, document):\n if self.lda is None:\n print(\"ERROR in lda_topic_model.predict_topic(): Need to create_lda() before predicting topics.\")\n dict_lda = getattr(self.lda, 'id2word')\n lda_vector = self.lda[dict_lda.doc2bow(self.to_bow(document))]\n return self.topic_names[max(lda_vector, key=lambda item: item[1])[0]]\n #print(max(lda_vector, key=lambda item: item[1])[0])\n #print(lda.print_topic(max(lda_vector, key=lambda item: item[1])[0])) # prints the most prominent LDA topic",
"def analyseArticleSentiment(self, for_topics):\n\n if (for_topics):\n model = self.topic_model\n else:\n model = self.party_model\n\n # Store the original text, for use later\n original_text = self.article_text \n\n # Next, find overall most likely topics\n text_vectorized = self.getVectorised(self.article_text)\n topic_binary_predictions = model.predict(text_vectorized)\n\n likely_topics = np.nonzero(topic_binary_predictions == True)[1]\n\n # Create dictionary, key: topic index, value: [sentiment scores, counter (for averaging)]\n topic_sentiment_scores = {}\n \n # Then, split the original text into paragraphs and find the most likely topics\n paragraphs = original_text.split(\"\\n\")\n\n # Only consider a paragraph if it has five or more sentences\n # If it doesn't, collate paragraphs into bigger paragraphs\n composite_paragraph = \"\"\n\n for paragraph in paragraphs:\n\n original_paragraph = paragraph\n\n if composite_paragraph != \"\":\n paragraph = composite_paragraph + paragraph\n \n sentences = sent_tokenize(paragraph)\n\n if (len(sentences) < 5):\n composite_paragraph += original_paragraph + \"\\n\"\n continue\n else:\n composite_paragraph = \"\"\n \n # Vectorize the paragraph, and make topic/party predictions\n paragraph_vectorized = self.getVectorised(paragraph) \n paragraph_binary_predictions = model.predict(paragraph_vectorized)\n paragraph_probabilities = model.predict_proba(paragraph_vectorized)[0][0]\n\n likely_paragraph_topics = np.nonzero(paragraph_binary_predictions == True)[1]\n paragraph_probabilities = dict([(paragraph_index, round(paragraph_probabilities[paragraph_index], 1)) for paragraph_index in range(0, len(paragraph_probabilities)) if paragraph_index in likely_paragraph_topics])\n\n paragraph_sentiment_scores = {}\n\n for topic in likely_paragraph_topics:\n if (topic not in paragraph_sentiment_scores):\n paragraph_sentiment_scores[topic] = 0\n\n # Next, get sentiment of each sentence\n for sentence in sentences:\n # Get the polarity of the sentence\n sentence_polarity = TextBlob(sentence).sentiment.polarity\n\n # If the sentence is likely talking about a topic found in the current paragraph, store weighted sentiment\n for topic_num in likely_paragraph_topics:\n # Get the probability of it being that topic\n paragraph_topic_weighting = paragraph_probabilities[topic_num]\n\n # Weight the polarity by the likelihood of the topic\n weighted_polarity = sentence_polarity * paragraph_topic_weighting\n paragraph_sentiment_scores[topic_num] += weighted_polarity\n\n # Following code deals with party entities (i.e. MPs), so skip if dealing with topic sentiment\n if (not for_topics):\n\n # Change to lower-case and strip accents\n preprocessed_sentence = self.preprocessor.changeToLower(sentence)\n preprocessed_sentence = self.preprocessor.stripAccents(sentence)\n\n # Check the entity tracker first, if we've already seen an MP previously\n for full_name, name_split in self.entity_tracker.items():\n search_forename = re.search(rf\".*{name_split[0]}.*\", preprocessed_sentence, re.IGNORECASE)\n search_surname = re.search(rf\".*{name_split[1]}.*\", preprocessed_sentence, re.IGNORECASE)\n search_full = re.search(rf\".*{full_name}.*\", preprocessed_sentence, re.IGNORECASE)\n\n if ((search_forename or search_surname) and not search_full): # If either parts of the name appear (but not together)\n party_num = name_split[2]\n party_num = int(party_num)\n if (party_num not in paragraph_sentiment_scores):\n paragraph_sentiment_scores[party_num] = 0\n paragraph_sentiment_scores[party_num]+= sentence_polarity\n\n # If the sentence contains an MP from a political party, get sentiment \n for mp_name, party_num in self.mps.items():\n party_num = int(party_num)\n search = re.search(rf\".*{mp_name}.*\", preprocessed_sentence, re.IGNORECASE)\n if (search):\n if (party_num not in paragraph_sentiment_scores):\n paragraph_sentiment_scores[party_num] = 0\n paragraph_sentiment_scores[party_num] += sentence_polarity\n\n # Separate first and last name for advanced entity searching in future sentences in paragraph\n if (mp_name not in self.entity_tracker):\n self.entity_tracker[mp_name] = [mp_name.split(\" \")[0], mp_name.split(\" \")[1], party_num]\n\n for topic, score in paragraph_sentiment_scores.items():\n if (topic not in topic_sentiment_scores):\n topic_sentiment_scores[topic] = [0,0]\n \n topic_sentiment_scores[topic][0] += score\n topic_sentiment_scores[topic][1] += 1\n\n # Returned object, key: topic index, value: score\n articleTopicSentimentsMatrix = {}\n\n # Once the text has been fully analysed, average the sentiment scores\n for topic_index, score_and_counter in topic_sentiment_scores.items():\n sentiment_score = score_and_counter[0] / score_and_counter[1]\n if (topic_index != 0):\n if (sentiment_score < -1):\n sentiment_score = -1\n elif (sentiment_score > 1):\n sentiment_score = 1\n articleTopicSentimentsMatrix[topic_index] = sentiment_score\n\n # Return list of pairs of topic/party and overall sentiment score (for article)\n return (likely_topics, articleTopicSentimentsMatrix)",
"def predict_topics(df):\n stopwords = (nltk.corpus.stopwords.words('english')\n + ['book', 'read'])\n dictionary = gensim.corpora.Dictionary.load('dictionary.gensim')\n lda_model = gensim.models.LdaMulticore.load('good_lda_model.gensim')\n\n sid = SentimentIntensityAnalyzer()\n lemmatizer = WordNetLemmatizer()\n\n def make_nvec(v, n=10):\n if len(v) == n:\n return [vv[1] for vv in v]\n result = [0.] * n\n for vv in v:\n result[vv[0]] = vv[1]\n return result\n\n sentiments = []\n nsentiments = []\n vectors = []\n for review in df['reviews']:\n sentences = []\n for sentence in review.split('.'):\n sentences.extend(sentence.split('||'))\n\n clean_sentences = [remove_proper_nouns(sentence)[0] for sentence in sentences]\n clean_sentences = [[lemmatizer.lemmatize(t) for t in nltk.word_tokenize(sentence)]\n for sentence in clean_sentences]\n clean_sentences = [[word for word in sentence if word not in stopwords and len(word) > 3]\n for sentence in clean_sentences]\n clean_sentences = [dictionary.doc2bow(sentence) for sentence in clean_sentences]\n sentence_vectors = [make_nvec(lda_model[sentence]) for sentence in clean_sentences]\n vectors.append(np.mean(sentence_vectors, axis=0))\n\n sentiment = np.zeros(10)\n nsentiment = np.zeros(10)\n for sentence, vector in zip(sentences, sentence_vectors):\n score = sid.polarity_scores(sentence)\n sentiment[np.argmax(vector)] += score['compound']\n nsentiment[np.argmax(vector)] += 1\n sentiments.append(sentiment / nsentiment)\n nsentiments.append(nsentiment)\n sentiments = np.array(sentiments)\n sentiments[np.isnan(sentiments)] = 0 # division by zero\n for i in range(10):\n df['review_embed{}'.format(i)] = sentiments[:, i]\n return df",
"def predict( classifier, new_msg ): # npa: can we know the \"certainty\" that this is correct?\n return classifier.classify( msg_features( new_msg, spam_words, ham_words ) )",
"def predict(self, texts, return_proba=False):\n\n is_array, is_pair = detect_text_format(texts)\n if not is_array: texts = [texts]\n\n classification, multilabel = U.is_classifier(self.model)\n\n # get predictions\n if U.is_huggingface(model=self.model):\n tseq = self.preproc.preprocess_test(texts, verbose=0)\n tseq.batch_size = self.batch_size\n tfd = tseq.to_tfdataset(train=False)\n preds = self.model.predict(tfd)\n if hasattr(preds, 'logits'): # dep_fix: breaking change - also needed for LongFormer\n #if type(preds).__name__ == 'TFSequenceClassifierOutput': # dep_fix: undocumented breaking change in transformers==4.0.0\n # REFERENCE: https://discuss.huggingface.co/t/new-model-output-types/195\n preds = preds.logits\n \n # dep_fix: transformers in TF 2.2.0 returns a tuple insead of NumPy array for some reason\n if isinstance(preds, tuple) and len(preds) == 1: preds = preds[0] \n else:\n texts = self.preproc.preprocess(texts)\n preds = self.model.predict(texts, batch_size=self.batch_size)\n\n # process predictions\n if U.is_huggingface(model=self.model):\n # convert logits to probabilities for Hugging Face models\n if multilabel and self.c:\n preds = activations.sigmoid(tf.convert_to_tensor(preds)).numpy()\n elif self.c:\n preds = activations.softmax(tf.convert_to_tensor(preds)).numpy()\n else:\n preds = np.squeeze(preds)\n if len(preds.shape) == 0: preds = np.expand_dims(preds, -1)\n result = preds if return_proba or multilabel or not self.c else [self.c[np.argmax(pred)] for pred in preds] \n if multilabel and not return_proba:\n result = [list(zip(self.c, r)) for r in result]\n if not is_array: return result[0]\n else: return result",
"def predict(self, x):\n predictionList=[]\n if self._model.loaded:\n for xValue in x:\n systemLabel=self._model.infer_topic(xValue)\n result=self._model.topicLabelling[systemLabel]\n predictionList.append(int(result))\n else:\n self._util.logError('TopicClusteringPredictionModel','Model needs to be loaded before prediction')\n\n return predictionList",
"def predict_paragraph(self, paragraph):\n # TODO: break paragraph into sentences.\n \n pass",
"def classify(texts: List[str], params: Any) -> List[str]:\n\n # ############################ REPLACE THIS WITH YOUR CODE #############################\n best_model, doc2vec, datasets_info = params\n\n X_test_start, X_test_len = datasets_info[texts[0]]\n X_test = doc2vec.get_X(X_test_start, X_test_len)\n\n preds_int = best_model.predict(X_test)\n preds = ['pos' if pr == 1 else 'neg' for pr in preds_int]\n\n return preds\n # ############################ REPLACE THIS WITH YOUR CODE #############################",
"def predict(slug, text):\n predictor = load_model(slug)\n return predictor.predict(text)",
"def prioritized_texts_with_label():\n print(\"getting prioritized texts\")\n n_texts = int(request.args.get('n'))\n texts = get_texts_only(n_all_samples)\n prios_texts = classifier.prioritize(map(lambda t: t['statement'], texts))\n # print(prios_texts)\n texts_prioritized = np.array(texts)[np.array(prios_texts)].tolist()\n to_label = texts_prioritized[:int(n_texts/2)]\n for sample in to_label:\n sample['label'] = \"\"\n\n texts_with_labels = random.sample([sample for sample in\n get_texts_with_labels(100, \"majority\")\n if sample['label'] in ['left', 'right']], int(n_texts/2))\n\n # print(texts_priotized)\n result = to_label + texts_with_labels\n random.shuffle(result)\n\n return jsonify({'data': result})",
"def predict(self, text):\n emotion_fields = [\n 'anger',\n 'anticipation',\n 'disgust',\n 'fear',\n 'joy',\n 'sadness',\n 'surprise',\n 'trust',\n ]\n sentiment_fields = [\n 'negative',\n 'positive'\n ]\n count = Counter()\n for token in word_tokenize(text.lower()):\n if token in self.model:\n count += Counter(self.model[token])\n # get % per emotion\n emotion_score = {}\n for key in emotion_fields:\n emotion_score[key] = count[key]\n emotion_perc = {}\n for key in emotion_fields:\n emotion_perc[key] = self.calculate_perc(count[key], sum(emotion_score.values()))\n # get % per sentiment\n sent_score = {}\n for key in sentiment_fields:\n sent_score[key] = count[key]\n sent_perc = {}\n for key in sentiment_fields:\n sent_perc[key] = self.calculate_perc(count[key], sum(sent_score.values()))\n return {\n 'emotion_cnt': emotion_score,\n 'emotion': emotion_perc,\n 'sentiment_cnt': sent_score,\n 'sentiment': sent_perc\n }",
"def prioritized_texts():\n print(\"getting prioritized texts\")\n n_texts = int(request.args.get('n'))\n texts = get_texts_only(n_all_samples)\n prios_texts = classifier.prioritize(map(lambda t: t['statement'], texts))\n # print(prios_texts)\n texts_priotized = np.array(texts)[np.array(prios_texts)].tolist()\n # print(texts_priotized)\n return jsonify({'data': texts_priotized[:n_texts]})",
"def predict_from_text(new_text, folder_save):\n print(folder_save)\n\n # Load transform and weights from training phase\n countvectorfile = os.path.join(folder_save, 'countvector.sav')\n cv = pickle.load(open(countvectorfile, 'rb'))\n\n Tfidfile = os.path.join(folder_save, 'Tfidfile.sav')\n tf = pickle.load(open(Tfidfile, 'rb'))\n\n bayesfile = os.path.join(folder_save, 'bayes_weights.sav')\n clf_bayes = pickle.load(open(bayesfile, 'rb'))\n\n logisticfile = os.path.join(folder_save, 'logistic_weights.sav')\n clf_logistic = pickle.load(open(logisticfile, 'rb'))\n\n # Apply on new text\n Text_clean = clean_my_new_text(new_text)\n new_corpus_vec = cv.transform([Text_clean])\n new_transform_vec = tf.transform(new_corpus_vec)\n X_test = new_transform_vec.todense()\n\n # Print the results\n print(\n f'According to Logreg, this poetry was song by {clf_logistic.predict(X_test)[0].upper()}')\n\n print(\"The probabilities are : \\n\")\n print(f'{clf_logistic.classes_} \\n')\n print(f'{clf_logistic.predict_proba(X_test)} \\n')\n\n print(\n f'According to Bayes, this poetry was song by {clf_bayes.predict(X_test)[0].upper()}')\n\n print(\"The probabilities are : \\n\")\n print(f'{clf_bayes.classes_} \\n')\n print(f'{clf_bayes.predict_proba(X_test)} \\n')\n\n return",
"def pred_lang(text, model):\n \n return model.predict(text)[0][0].replace('__label__', '')",
"def predict_proba(self, text):\n probabilities = detect_langs(text)\n converted = []\n for el in probabilities:\n converted.append({'lang': self.map2wili(el.lang),\n 'prob': el.prob})\n return converted",
"def predict_label(texts, labels, text_new):\r\n # YOUR CODE HERE\r\n\r\n # texts = ['RT @GOPLeader', 'RT @GOPLeader', 'Colorless green ideas sleep furiously.']\r\n # labels = ['rep', 'rep', 'dem']\r\n\r\n train_twitter = texts\r\n test_twitter = text_new\r\n\r\n from sklearn.feature_extraction.text import CountVectorizer\r\n from sklearn.feature_extraction.text import TfidfTransformer\r\n from sklearn.naive_bayes import MultinomialNB\r\n\r\n count_vect = CountVectorizer()\r\n twitter_train_counts = count_vect.fit_transform(train_twitter)\r\n\r\n tf_transformer = TfidfTransformer(use_idf=False).fit(twitter_train_counts)\r\n twitter_train_tf = tf_transformer.transform(twitter_train_counts)\r\n\r\n tfidf_transformer = TfidfTransformer()\r\n twitter_train_tfidf = tfidf_transformer.fit_transform(twitter_train_counts)\r\n\r\n twitter_clf = MultinomialNB().fit(twitter_train_tfidf,labels )\r\n\r\n # transforming the test data\r\n\r\n twitter_test_data = count_vect.transform(test_twitter)\r\n twitter_tfidf = tfidf_transformer.transform(twitter_test_data)\r\n\r\n #prediction\r\n twitter_predicted = twitter_clf.predict(twitter_tfidf)\r\n\r\n for text, class_label in zip(test_twitter, twitter_predicted):\r\n print('%r => %s' % (text, class_label))\r\n\r\n\r\n return list(twitter_predicted)",
"def infer_labels(text):\n\n if model is None or tokenizer is None:\n load_distilroberta()\n\n inputs, labels = process_input(text)\n\n return compute_labels(inputs, labels)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Simple method to compute distances from points in v1 to points in v2.
|
def distances(self, v1, v2):
v1_2 = v1.unsqueeze(1).expand(v1.size(0), v2.size(0), v1.size(1))
v2_2 = v2.unsqueeze(0).expand(v1.size(0), v2.size(0), v1.size(1))
return torch.sqrt(torch.pow(v2_2 - v1_2, 2).sum(2) + 0.000000001)
|
[
"def vec_dist(v1, v2):\n dist = 0\n j = 0\n for i in range(len(v1)):\n while j<len(v2) and v1[i][0]>v2[j][0]:\n dist = dist + v2[j][1]**2\n j = j + 1\n p = v1[i][1]**2 if j>=len(v2) or v2[j][0]>v1[i][0] \\\n else (v2[j][1]-v1[i][1])**2\n dist = dist + p\n if len(v1)==0:\n dist = sum(v[1]**2 for v in v2)\n return dist",
"def minkowskiDist(v1, v2, p=2):\n dist = 0.0\n for i in range(len(v1)):\n dist += abs(v1[i] - v2[i])**p\n return dist**(1.0/p)",
"def distance(self, v2):\n if len(self.coordinates) == len(v2.coordinates):\n result = 0\n summe = 0\n for i in range(0, len(v2.coordinates)):\n summe = summe + (self.coordinates[i] - v2.coordinates[i]) ** 2\n result = math.sqrt(summe)\n return result\n else:\n raise ValueError(\"Arrays not of the same dimension!\")",
"def distanceV(vector1, vector2):\n\treturn vector1[1] - vector2[1]",
"def compute_distance(cls, point_1, point_2):\n return abs(point_1 - point_2)",
"def distance(point1: list | np.ndarray,\n point2: list | np.ndarray) -> float:\n v1, v2 = pad_with_zeros(point1, point2)\n return np.linalg.norm(v2 - v1)",
"def distance(point1, point2):\n return point1.dist_to(point2)",
"def distance_between_points(p1, p2):\n import math\n import math.sqrt((p2.x-p1.x)**2 + (p2.y - p1.y)**2)",
"def distance(p1: sdl2.SDL_Point, p2: sdl2.SDL_Point) -> float:\n\n distances = xy_distances(p1, p2)\n return math.sqrt(distances.x**2 + distances.y**2)",
"def dist(loc1, loc2):\n return math.sqrt((loc1[0]-loc2[0])**2 + (loc1[1]-loc2[1])**2)",
"def distance(self, u, v):\n # TODO: Implement the distance function between vectors u and v]\n # Note: you can also think of this as computing a similarity measure\n\n pass",
"def calculateSimilarity(self, v1, v2):\n pass",
"def distance(waypoints, wp1, wp2):\n dist = 0\n dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)\n for i in range(wp1, wp2+1):\n dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)\n wp1 = i\n return dist",
"def euclidean_distance(list_1, list_2):\n\n # Guard against empty lists.\n if len(v) is 0:\n return 0\n\n # Note that this is the same as vector subtraction.\n differences = [list_1[idx] - list_2[idx] for idx in range(len(v))]\n squares = [diff ** 2 for diff in differences]\n sum_of_squares = sum(squares)\n\n return 1 / (1 + ((sum_of_squares) ** 0.05))",
"def minkowski_distance(point1, point2):\n p=3\n p_sum = 0\n for i in range(0,len(point1)):\n p_sum+=(abs(point1[i]-point2[i]))**p\n return p_sum**(1. /p)\n raise NotImplementedError",
"def basic(r1, r2):\n temp = 0\n n = 0\n for key in r1:\n if key in r2:\n temp += abs(r1[key] - r2[key])\n n += 1\n if n != 0:\n distance = float(temp)/float(n)\n else:\n distance = sys.float_info.max\n return distance",
"def distance_point_point(point1, point2=(0., 0.)):\n dx = point1[0] - point2[0]\n dy = point1[1] - point2[1]\n return sqrt(dx*dx + dy*dy)",
"def point_distance():\n\n\n #create point1 and point2 instances\n point1 = Point()\n point2 = Point()\n\n # assign values to point objects\n point1.x = 15\n point1.y = 15\n\n point2.x = 5\n point2.y = 5\n\n #compute distance\n distance = sqrt((point2.y - point1.y)**2 + (point2.x - point1.x)**2)\n return distance",
"def compute_feature_distances(features1, features2):\n\n ###########################################################################\n # TODO: YOUR CODE HERE #\n ###########################################################################\n blank = np.zeros((len(features1), len(features2)))\n for i in range(len(features1)):\n for j in range(len(features2)):\n d1 = features1[i]\n d2 = features2[j]\n dist = ((d1[0]-d2[0]) ** 2 + (d1[1] - d2[1]) ** 2) ** (1/2) \n blank[i,j] = dist\n dists = blank\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return dists"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
result[0], label and weight are lists of size gpus = batch_size. segmentation has as many elements as UResNet returns. label[0] has shape (N, 1) where N is pts across minibatch_size events.
|
def forward(self, result, label, cluster_label):
assert len(result['segmentation']) == len(label)
batch_ids = [d[0][:, -2] for d in label]
uresnet_loss, uresnet_acc = 0., 0.
cluster_intracluster_loss = 0.
cluster_intercluster_loss = 0.
cluster_reg_loss = 0.
cluster_real_distance_loss = 0.
cluster_total_loss = 0.
cluster_intracluster_loss_per_class = [0.] * self._num_classes
cluster_intercluster_loss_per_class = [0.] * self._num_classes
cluster_reg_loss_per_class = [0.] * self._num_classes
cluster_real_distance_loss_per_class = [0.] * self._num_classes
cluster_total_loss_per_class = [0.] * self._num_classes
density_loss = 0.
density_lossA_estimate, density_lossA_target = 0., 0.
density_lossB_estimate, density_lossB_target = 0., 0.
density_accA = [0.] * len(self._radius)
density_accB = [0.] * len(self._radius)
accuracy = []
for j in range(self._depth):
accuracy.append([0.] * self._num_classes)
for i in range(len(label)):
max_depth = len(cluster_label[i])
# for j, feature_map in enumerate(segmentation[3][i]):
# hypercoordinates = feature_map.features
# self.distances2(hypercoordinates)
for b in batch_ids[i].unique():
batch_index = batch_ids[i] == b
event_segmentation = result['segmentation'][i][batch_index] # (N, num_classes)
event_label = label[i][0][batch_index][:, -1][:, None] # (N, 1)
event_label = torch.squeeze(event_label, dim=-1).long()
# Reorder event_segmentation to match event_label
data_coords = result['cluster_feature'][i][-1].get_spatial_locations()[batch_index][:, :-1]
perm = np.lexsort((data_coords[:, 2], data_coords[:, 1], data_coords[:, 0]))
event_segmentation = event_segmentation[perm]
# Loss for semantic segmentation
loss_seg = self.cross_entropy(event_segmentation, event_label)
uresnet_loss += torch.mean(loss_seg)
# Accuracy for semantic segmentation
predicted_labels = torch.argmax(event_segmentation, dim=-1)
acc = predicted_labels.eq_(event_label).sum().item() / float(predicted_labels.nelement())
uresnet_acc += acc
# Loss for clustering
# Loop first over feature maps, starting from coarsest one
# Note: We have to be careful with coordinates sorting.
# TODO
# - density estimation for points within 0.1, 0.3, 0.5, outside of 0.5 of cluster center
for j, feature_map in enumerate(result['cluster_feature'][i]):
if torch.cuda.is_available():
batch_index = feature_map.get_spatial_locations()[:, -1].cuda() == b.long()
else:
batch_index = feature_map.get_spatial_locations()[:, -1] == b.long()
hypercoordinates = feature_map.features[batch_index]
coordinates = feature_map.get_spatial_locations()[batch_index][:, :-1]
clusters = cluster_label[i][-(j+1+(max_depth-self._depth))][cluster_label[i][-(j+1+(max_depth-self._depth))][:, -2] == b]
# clusters_coordinates = clusters[:, :self._dimension]
clusters_labels = clusters[:, -1:]
semantic_labels = label[i][-(j+1+(max_depth-self._depth))][label[i][-(j+1+(max_depth-self._depth))][:, -2] == b]
# Sort coordinates in lexicographic order
x = coordinates.cpu().detach().numpy()
perm = np.lexsort((x[:, 2], x[:, 1], x[:, 0]))
coordinates = coordinates[perm]
hypercoordinates = hypercoordinates[perm]
# Density estimate loss
if self._density_estimate and j > 0:
density_estimate = result['density_feature'][i][j-1][batch_index][perm]
clusters_id = clusters_labels.unique()
lossA_estimate, lossA_target, lossB_estimate, lossB_target = 0., 0., 0., 0.
total_densityA = [0.] * len(self._radius)
total_densityB = [0.] * len(self._radius)
distances = self.distances2(hypercoordinates[None,...][..., :3]).squeeze(0)
for c in clusters_id:
cluster_idx = (clusters_labels == c).squeeze(1)
cluster = hypercoordinates[cluster_idx]
estimate = density_estimate[cluster_idx]
for k, r in enumerate(self._radius):
# d = (self.distances(cluster, hypercoordinates) < r)
d = (distances[cluster_idx, :] < r)
# d = self.radius(cluster, hypercoordinates, r)
densityA = d[:, cluster_idx].sum(dim=1)
densityB = d[:, ~cluster_idx].sum(dim=1)
# total = (densityA + densityB).float()
densityA = densityA.float() #/ total
densityB = densityB.float() #/ total
total_densityA[k] += densityA.mean()
total_densityB[k] += densityB.mean()
lossA_estimate += torch.pow(estimate[:, 0] - densityA, 2) .mean()
lossB_estimate += torch.pow(estimate[:, 1] - densityB, 2).mean()
lossA_target += torch.pow(torch.clamp(self._target_densityA[k] - densityA, min=0), 2).mean()
lossB_target += torch.pow(torch.clamp(densityB - self._target_densityB[k], min=0), 2).mean()
# print("densityA", j, c, densityA.mean())
#print(torch.clamp(self._target_densityA - densityA, min=0))
#print("densityB", densityB)
#print(torch.clamp(densityB - self._target_densityB, min=0))
lossA_estimate /= clusters_id.size(0) * len(self._radius)
lossA_target /= clusters_id.size(0) * len(self._radius)
lossB_estimate /= clusters_id.size(0) * len(self._radius)
lossB_target /= clusters_id.size(0) * len(self._radius)
density_loss += self._density_weightA * (lossA_estimate + lossA_target) + self._density_weightB * (lossB_estimate + lossB_target)
density_lossA_estimate += lossA_estimate
density_lossB_estimate += lossB_estimate
density_lossA_target += lossA_target
density_lossB_target += lossB_target
for k in range(len(self._radius)):
total_densityA[k] /= clusters_id.size(0) * len(self._radius)
density_accA[k] += total_densityA[k]
total_densityB[k] /= clusters_id.size(0) * len(self._radius)
density_accB[k] += total_densityB[k]
# print(density_lossA_estimate, density_lossA_target, density_lossB_estimate, density_lossB_target)
# Loop over semantic classes
for class_ in range(self._num_classes):
class_index = semantic_labels[:, -1] == class_
# 0. Identify label clusters
clusters_id = clusters_labels[class_index].unique()
hyperclusters = [] # Hypercoordinates for each true cluster
realclusters = [] # Real coordinates of centroids of each true cluster
for c in clusters_id:
cluster_idx = (clusters_labels[class_index] == c).squeeze(1)
hyperclusters.append(hypercoordinates[class_index][cluster_idx])
realclusters.append(coordinates[class_index][cluster_idx].float())
# 1. Loop over clusters, define intra-cluster loss
#
# Also define real cluster loss = mean distance in real
# coordinates from a point to the centroid of the
# predicted cluster. This should avoid clustering
# together points that are far away in the real life.
intra_cluster_loss = 0.
real_distance_loss = 0.
means = []
realmeans = []
zero = torch.tensor(0.)
if torch.cuda.is_available(): zero = zero.cuda()
C = len(hyperclusters)
if C > 0:
for x, cluster in enumerate(hyperclusters):
mean = cluster.mean(dim=0)
means.append(mean)
realmean = realclusters[x].mean(dim=0)
realmeans.append(realmean)
# intra_cluster_loss += torch.max(((mean - cluster).pow(2).sum(dim=1) + 0.000000001).sqrt() - self._intra_cluster_margin, zero).pow(2).mean()
intra_cluster_loss += torch.mean(torch.pow(torch.clamp(torch.norm(cluster-mean, dim=1)- self._intra_cluster_margin[j], min=0), 2))
intra_cluster_loss /= C
means = torch.stack(means)
realmeans = torch.stack(realmeans)
# Now compute real cluster loss
for x, cluster in enumerate(hyperclusters):
# Assign each point to a predicted centroid
predicted_assignments = torch.argmin(self.distances(cluster, means), dim=1)
# Distance to this centroid in real space
real_distance_loss += torch.mean(torch.pow(torch.norm(realmeans[predicted_assignments] - realclusters[x], dim=1), 2))
real_distance_loss /= C
# compute accuracy based on this heuristic cluster
# prediction assignments
predicted_assignments = torch.argmin(self.distances(hypercoordinates[class_index], means), dim=1)
predicted_assignments = clusters_id[predicted_assignments]
accuracy[j][class_] += predicted_assignments.eq_(clusters_labels[class_index].squeeze(1)).sum().item() / float(predicted_assignments.nelement())
# 2. Define inter-cluster loss
inter_cluster_loss = 0.
if C > 1:
d = torch.max(2 * self._inter_cluster_margin[j] - self.distances(means, means), zero).pow(2)
inter_cluster_loss = d[np.triu_indices(d.size(1), k=1)].sum() * 2.0 / (C * (C-1))
# 3. Add regularization term
reg_loss = 0.
if C > 0:
# reg_loss = (means.pow(2).sum(dim=1) + 0.000000001).sqrt().mean()
reg_loss = torch.norm(means, dim=1).mean()
# Compute final loss
total_loss = self._alpha * intra_cluster_loss + self._beta * inter_cluster_loss + self._gamma * reg_loss + self._delta * real_distance_loss
cluster_intracluster_loss += self._alpha * intra_cluster_loss
cluster_intercluster_loss += self._beta * inter_cluster_loss
cluster_reg_loss += self._gamma * reg_loss
cluster_real_distance_loss += self._delta * real_distance_loss
cluster_total_loss += total_loss
cluster_intracluster_loss_per_class[class_] += self._alpha * intra_cluster_loss
cluster_intercluster_loss_per_class[class_] += self._beta * inter_cluster_loss
cluster_reg_loss_per_class[class_] += self._gamma * reg_loss
cluster_real_distance_loss_per_class[class_] += self._delta * real_distance_loss
cluster_total_loss_per_class[class_] += total_loss
# print(feature_map.features.shape, feature_map.spatial_size)
# print(j, class_, "Intra =", torch.tensor(self._alpha * intra_cluster_loss).item())
# print(j, class_, "Inter =", torch.tensor(self._beta * inter_cluster_loss).item())
# print(j, class_, "Reg = ", torch.tensor(self._gamma * reg_loss).item())
# print(j, class_, "Intra =", torch.tensor(self._alpha * intra_cluster_loss/float(self._num_classes)).item())
# print(j, class_, "Inter =", torch.tensor(self._beta * inter_cluster_loss/float(self._num_classes)).item())
# print(j, class_, "Reg = ", torch.tensor(self._gamma * reg_loss/float(self._num_classes)).item())
batch_size = len(batch_ids[i].unique())
# cluster_intracluster_loss /= self._num_classes
# cluster_intercluster_loss /= self._num_classes
# cluster_reg_loss /= self._num_classes
# cluster_total_loss /= self._num_classes
# cluster_intracluster_loss_per_class = [x/batch_size for x in cluster_intracluster_loss_per_class]
# cluster_intercluster_loss_per_class = [x/batch_size for x in cluster_intercluster_loss_per_class]
# cluster_reg_loss_per_class = [x/batch_size for x in cluster_reg_loss_per_class]
# cluster_total_loss_per_class = [x/batch_size for x in cluster_total_loss_per_class]
# print("Intra = ", cluster_intracluster_loss.item())
# print("Inter = ", cluster_intercluster_loss.item())
# print("Reg = ", cluster_reg_loss.item())
results = {
'accuracy': uresnet_acc / float(batch_size),
'loss': (self._uresnet_weight * uresnet_loss + cluster_total_loss + self._density_weight * density_loss) / float(batch_size),
'uresnet_loss': self._uresnet_weight * uresnet_loss / float(batch_size),
'uresnet_acc': uresnet_acc / float(batch_size),
'intracluster_loss': cluster_intracluster_loss / float(batch_size),
'intercluster_loss': cluster_intercluster_loss / float(batch_size),
'reg_loss': cluster_reg_loss / float(batch_size),
'real_distance_loss': cluster_real_distance_loss / float(batch_size),
'total_cluster_loss': cluster_total_loss / float(batch_size),
'density_loss': self._density_weight * density_loss / float(batch_size),
'density_lossA_estimate': self._density_weight * self._density_weightA * density_lossA_estimate / float(batch_size),
'density_lossB_estimate': self._density_weight * self._density_weightB * density_lossB_estimate / float(batch_size),
'density_lossA_target': self._density_weight * self._density_weightA * density_lossA_target / float(batch_size),
'density_lossB_target': self._density_weight * self._density_weightB * density_lossB_target / float(batch_size),
}
for i, r in enumerate(self._radius):
results['density_accA_%.2f' % r] = density_accA[i] / float(batch_size)
results['density_accB_%.2f' % r] = density_accB[i] / float(batch_size)
for class_ in range(self._num_classes):
results['intracluster_loss_%d' % class_] = cluster_intracluster_loss_per_class[class_] / float(batch_size)
results['intercluster_loss_%d' % class_] = cluster_intercluster_loss_per_class[class_] / float(batch_size)
results['reg_loss_%d' % class_] = cluster_reg_loss_per_class[class_] / float(batch_size)
results['real_distance_loss_%d' % class_] = cluster_real_distance_loss_per_class[class_] / float(batch_size)
results['total_cluster_loss_%d' % class_] = cluster_total_loss_per_class[class_] / float(batch_size)
for j in range(self._depth):
results['acc_%d_%d' % (j, class_)] = accuracy[j][class_] / float(batch_size)
return results
|
[
"def training_pool(self):",
"def train_conv_net(datasets,\n U,\n word_idx_map,\n img_w=300, \n filter_hs=[3,4,5],\n hidden_units=[100,2], \n dropout_rate=[0.5],\n shuffle_batch=True,\n n_epochs=11, \n batch_size=50, \n lr_decay = 0.95,\n conv_non_linear=\"relu\",\n activations=[Iden],\n sqr_norm_lim=9,\n non_static=True,\n pi_params=[1.,0],\n C=1.0,\n patience=20): \n rng = np.random.RandomState(3435)\n # 其实为句子的长度sent_len\n img_h = len(datasets[0][0])-1\n filter_w = img_w \n feature_maps = hidden_units[0]\n filter_shapes = []\n pool_sizes = []\n for filter_h in filter_hs:\n filter_shapes.append((feature_maps, 1, filter_h, filter_w))\n # 在img_h×img_w大小的图片上进行s=1,f=f_h×f_w的卷积操作时,\n # 所得的卷积结果图大小为(img_h-f_h+1)×(img_w-f_w+1)\n # 然后经过大小为(img_h-f_h+1)×(img_w-f_w+1)的池化层后,就只剩下一个“点”了\n pool_sizes.append((img_h-filter_h+1, img_w-filter_w+1))\n # [('image shape', 61, 300), ('filter shape', [(100, 1, 3, 300), (100, 1, 4, 300), (100, 1, 5, 300)]),\n # ('hidden_units', [100, 2]), ('dropout', [0.4]), ('batch_size', 50), ('non_static', True),\n # ('learn_decay', 0.95), ('conv_non_linear', 'relu'), ('non_static', True), ('sqr_norm_lim', 9),\n # ('shuffle_batch', True), ('pi_params', [0.95, 0]), ('C', 6.0)]\n parameters = [(\"image shape\",img_h,img_w), (\"filter shape\",filter_shapes), (\"hidden_units\",hidden_units),\n (\"dropout\",dropout_rate), (\"batch_size\",batch_size), (\"non_static\",non_static),\n (\"learn_decay\",lr_decay), (\"conv_non_linear\",conv_non_linear), (\"non_static\",non_static),\n (\"sqr_norm_lim\",sqr_norm_lim), (\"shuffle_batch\",shuffle_batch), (\"pi_params\",pi_params),\n (\"C\",C)]\n print(parameters) \n \n #define model architecture\n index = T.lscalar()\n # shape=([sent_sum|batch_size], [sent_len|img_h]): 即共有sent_sum句话,每句由sent_len个单词的id组成\n x = T.matrix('x')\n # shape=(sent_sum, 1) \n y = T.ivector('y')\n # shape=(vocal_size, word_size)\n Words = theano.shared(value = U, name = \"Words\")\n zero_vec_tensor = T.vector()\n zero_vec = np.zeros(img_w)\n set_zero = theano.function([zero_vec_tensor], updates=[(Words, T.set_subtensor(Words[0,:], zero_vec_tensor))], allow_input_downcast=True)\n # x.flatten(): 将 x 按行展开\n # shape=(sent_sum,1,sent_len,word_size)\n # 对应于图像,其意思即为:共有sent_sum张图像,每张图像的通道为1且大小为sent_len×word_size\n layer0_input = Words[T.cast(x.flatten(),dtype=\"int32\")].reshape((x.shape[0], 1, x.shape[1], Words.shape[1]))\n conv_layers = []\n layer1_inputs = []\n # 第1层输入有filter_hs种卷积核\n for i in xrange(len(filter_hs)):\n # value=[filter_sum,filter_layer,filter_h,filter_w]\n # 即共有filter_sum个卷积核,每个卷积核的大小为word_h×word_w且层数/通道为filter_layer\n filter_shape = filter_shapes[i]\n pool_size = pool_sizes[i]\n # image_shape is actually the shape of input\n conv_layer = LeNetConvPoolLayer(rng, input=layer0_input, image_shape=(batch_size, 1, img_h, img_w),\n filter_shape=filter_shape, poolsize=pool_size, non_linear=conv_non_linear)\n # flatten(axis):axis>0, 即将tensor从axis维度开始的所有维度进行“坍缩”,具体如下\n # conv_layer.output: shape=(sent_sum,filter_sum)\n # layer1_input: shape=(sent_sum,filter_sum)\n layer1_input = conv_layer.output.flatten(2)\n conv_layers.append(conv_layer)\n layer1_inputs.append(layer1_input)\n # shape=(sent_sum, filter_sum*len(filter_hs)=300)\n layer1_input = T.concatenate(layer1_inputs, 1)\n hidden_units[0] = feature_maps*len(filter_hs)\n # 实际上,这里的CNN仅有两层:input-conv(-max_pool)-output\n classifier = MLPDropout(rng, input=layer1_input, layer_sizes=hidden_units, activations=activations, dropout_rates=dropout_rate)\n \n # build the feature of BUT-rule\n # shape=([sent_sum|batch_size], [sent_len|img_h]): 即共有sent_sum句话,每句由sent_len个单词的id组成\n f_but = T.fmatrix('f_but')\n # shape=(batch_size,1)\n f_but_ind = T.fmatrix('f_ind') # indicators\n f_but_layer0_input = Words[T.cast(f_but.flatten(),dtype=\"int32\")].reshape((f_but.shape[0],1,f_but.shape[1],Words.shape[1]))\n f_but_pred_layers = []\n for conv_layer in conv_layers:\n # shape=(batch_size, filter_sum=filter_shape[0], 1, 1)\n # after flatten: shape=(batch_size, filter_sum)\n f_but_layer0_output = conv_layer.predict(f_but_layer0_input, batch_size)\n f_but_pred_layers.append(f_but_layer0_output.flatten(2))\n # shape=(batch_size, filter_sum*len(filter_hs)=300)\n f_but_layer1_input = T.concatenate(f_but_pred_layers, 1)\n # shape=(batch_size, class=2)\n f_but_y_pred_p = classifier.predict_p(f_but_layer1_input)\n # shape=(batch_size, label+class=1+2=3)\n f_but_full = T.concatenate([f_but_ind,f_but_y_pred_p], axis=1) # batch_size x 1 + batch_size x K\n f_but_full = theano.gradient.disconnected_grad(f_but_full)\n\n # add logic layer\n nclasses = 2\n rules = [FOL_But(nclasses, x, f_but_full)]\n rule_lambda = [1]\n new_pi = get_pi(cur_iter=0, params=pi_params)\n logic_nn = LogicNN(rng, input=x, network=classifier, rules=rules, rule_lambda=rule_lambda, pi=new_pi, C=C)\n \n # define parameters of the model and update functions using adadelta\n # list\n params_p = logic_nn.params_p\n for conv_layer in conv_layers:\n # append list\n params_p += conv_layer.params\n if non_static:\n #if word vectors are allowed to change, add them as model parameters\n params_p += [Words]\n # 公式 (2)——objective function\n cost_p = logic_nn.negative_log_likelihood(y) \n dropout_cost_p = logic_nn.dropout_negative_log_likelihood(y) \n grad_updates_p = sgd_updates_adadelta(params_p, dropout_cost_p, lr_decay, 1e-6, sqr_norm_lim)\n \n # shuffle dataset and assign to mini batches. if dataset size is not a multiple of mini batches, replicate\n # extra data (at random)\n np.random.seed(3435)\n # training data\n if datasets[0].shape[0] % batch_size > 0:\n extra_data_num = batch_size - datasets[0].shape[0] % batch_size\n # shuffle both train data and features\n permutation_order = np.random.permutation(datasets[0].shape[0])\n train_set = datasets[0][permutation_order]\n extra_data = train_set[:extra_data_num]\n new_data=np.append(datasets[0],extra_data,axis=0)\n new_fea = {}\n train_fea = datasets[3]\n for k in train_fea.keys():\n train_fea_k = train_fea[k][permutation_order]\n extra_fea = train_fea_k[:extra_data_num]\n new_fea[k] = np.append(train_fea[k],extra_fea,axis=0)\n train_text = datasets[6][permutation_order]\n extra_text = train_text[:extra_data_num]\n new_text=np.append(datasets[6],extra_text,axis=0)\n else:\n new_data = datasets[0]\n new_fea = datasets[3]\n new_text = datasets[6]\n # shuffle both training data and features\n permutation_order = np.random.permutation(new_data.shape[0])\n new_data = new_data[permutation_order]\n for k in new_fea.keys():\n new_fea[k] = new_fea[k][permutation_order]\n new_text = new_text[permutation_order]\n n_batches = new_data.shape[0] / batch_size\n n_train_batches = n_batches\n train_set = new_data\n train_set_x, train_set_y = shared_dataset((train_set[:,:img_h],train_set[:,-1]))\n train_fea = new_fea\n train_fea_but_ind = train_fea['but_ind'].reshape([train_fea['but_ind'].shape[0],1])\n train_fea_but_ind = shared_fea(train_fea_but_ind)\n for k in new_fea.keys():\n if k!='but_text':\n train_fea[k] = shared_fea(new_fea[k])\n\n # val data\n if datasets[1].shape[0] % batch_size > 0:\n extra_data_num = batch_size - datasets[1].shape[0] % batch_size\n # shuffle both val data and features\n permutation_order = np.random.permutation(datasets[1].shape[0])\n val_set = datasets[1][permutation_order]\n extra_data = val_set[:extra_data_num]\n new_val_data=np.append(datasets[1],extra_data,axis=0)\n new_val_fea = {}\n val_fea = datasets[4]\n for k in val_fea.keys():\n val_fea_k = val_fea[k][permutation_order]\n extra_fea = val_fea_k[:extra_data_num]\n new_val_fea[k] = np.append(val_fea[k],extra_fea,axis=0)\n val_text = datasets[7][permutation_order]\n extra_text = val_text[:extra_data_num]\n new_val_text = np.append(datasets[7],extra_text,axis=0)\n else:\n new_val_data = datasets[1]\n new_val_fea = datasets[4]\n new_val_text = datasets[7]\n val_set = new_val_data\n val_set_x, val_set_y = shared_dataset((val_set[:,:img_h],val_set[:,-1]))\n n_batches = new_val_data.shape[0] / batch_size\n n_val_batches = n_batches\n val_fea = new_val_fea\n val_fea_but_ind = val_fea['but_ind'].reshape([val_fea['but_ind'].shape[0],1])\n val_fea_but_ind = shared_fea(val_fea_but_ind)\n for k in val_fea.keys():\n if k!='but_text':\n val_fea[k] = shared_fea(val_fea[k])\n\n # test data\n test_set_x = datasets[2][:,:img_h] \n test_set_y = np.asarray(datasets[2][:,-1],\"int32\")\n test_fea = datasets[5]\n test_fea_but_ind = test_fea['but_ind']\n test_fea_but_ind = test_fea_but_ind.reshape([test_fea_but_ind.shape[0],1])\n test_text = datasets[8]\n\n ### compile theano functions to get train/val/test errors\n val_model = theano.function([index], logic_nn.errors(y),\n givens={\n x: val_set_x[index * batch_size: (index + 1) * batch_size],\n y: val_set_y[index * batch_size: (index + 1) * batch_size],\n f_but: val_fea['but'][index * batch_size: (index + 1) * batch_size],\n f_but_ind: val_fea_but_ind[index * batch_size: (index + 1) * batch_size,:] },\n allow_input_downcast=True,\n on_unused_input='warn')\n \n test_model = theano.function([index], logic_nn.errors(y),\n givens={\n x: train_set_x[index * batch_size: (index + 1) * batch_size],\n y: train_set_y[index * batch_size: (index + 1) * batch_size],\n f_but: train_fea['but'][index * batch_size: (index + 1) * batch_size],\n f_but_ind: train_fea_but_ind[index * batch_size: (index + 1) * batch_size,:]},\n allow_input_downcast=True,\n on_unused_input='warn')\n\n train_model = theano.function([index], cost_p, updates=grad_updates_p,\n givens={\n x: train_set_x[index*batch_size:(index+1)*batch_size],\n y: train_set_y[index*batch_size:(index+1)*batch_size],\n f_but: train_fea['but'][index*batch_size:(index+1)*batch_size],\n f_but_ind: train_fea_but_ind[index*batch_size:(index+1)*batch_size,:]},\n allow_input_downcast = True,\n on_unused_input='warn')\n\n ### setup testing\n test_size = test_set_x.shape[0]\n print('test size ', test_size) \n test_pred_layers = []\n test_layer0_input = Words[T.cast(x.flatten(),dtype=\"int32\")].reshape((test_size,1,img_h,Words.shape[1]))\n f_but_test_pred_layers = []\n f_but_test_layer0_input = Words[T.cast(f_but.flatten(),dtype=\"int32\")].reshape((test_size,1,img_h,Words.shape[1]))\n for conv_layer in conv_layers:\n test_layer0_output = conv_layer.predict(test_layer0_input, test_size)\n test_pred_layers.append(test_layer0_output.flatten(2))\n f_but_test_layer0_output = conv_layer.predict(f_but_test_layer0_input, test_size)\n f_but_test_pred_layers.append(f_but_test_layer0_output.flatten(2))\n test_layer1_input = T.concatenate(test_pred_layers, 1)\n f_but_test_layer1_input = T.concatenate(f_but_test_pred_layers, 1)\n f_but_test_y_pred_p = classifier.predict_p(f_but_test_layer1_input)\n f_but_test_full = T.concatenate([f_but_ind,f_but_test_y_pred_p],axis=1) # Ns x 1 + Ns x K\n\n # transform to shared variables\n test_set_x_shr, test_set_y_shr = shared_dataset((test_set_x,test_set_y))\n\n test_q_y_pred, test_p_y_pred = logic_nn.predict(test_layer1_input,\n test_set_x_shr,\n [f_but_test_full])\n test_q_error = T.mean(T.neq(test_q_y_pred, y))\n test_p_error = T.mean(T.neq(test_p_y_pred, y))\n test_model_all = theano.function([x,y,f_but,f_but_ind],\n [test_q_error, test_p_error], allow_input_downcast = True,\n on_unused_input='warn')\n \n ### start training over mini-batches\n print('... training')\n epoch = 0\n batch = 0\n best_val_q_perf = 0\n val_p_perf = 0\n val_q_perf = 0\n cost_epoch = 0 \n stop_count = 0\n while (epoch < n_epochs):\n start_time = time.time()\n epoch = epoch + 1\n # train\n if shuffle_batch:\n for minibatch_index in np.random.permutation(range(n_train_batches)):\n batch = batch + 1\n new_pi = get_pi(cur_iter=batch*1./n_train_batches, params=pi_params)\n logic_nn.set_pi(new_pi)\n cost_epoch = train_model(minibatch_index)\n set_zero(zero_vec)\n else:\n for minibatch_index in xrange(n_train_batches):\n batch = batch + 1\n new_pi = get_pi(cur_iter=batch*1./n_train_batches, params=pi_params)\n logic_nn.set_pi(new_pi)\n cost_epoch = train_model(minibatch_index) \n set_zero(zero_vec)\n # eval\n train_losses = [test_model(i) for i in xrange(n_train_batches)]\n train_losses = np.array(train_losses)\n train_q_perf = 1 - np.mean(train_losses[:,0])\n train_p_perf = 1 - np.mean(train_losses[:,1])\n val_losses = [val_model(i) for i in xrange(n_val_batches)]\n val_losses = np.array(val_losses)\n val_q_perf = 1 - np.mean(val_losses[:,0])\n val_p_perf = 1 - np.mean(val_losses[:,1])\n print('epoch: %i, training time: %.2f secs; (q): train perf: %.4f %%, val perf: %.4f %%; (p): train perf: %.4f %%, val perf: %.4f %%' % \\\n (epoch, time.time()-start_time, train_q_perf * 100., val_q_perf*100., train_p_perf * 100., val_p_perf*100.))\n test_loss = test_model_all(test_set_x,test_set_y,test_fea['but'],test_fea_but_ind)\n test_loss = np.array(test_loss)\n test_perf = 1 - test_loss\n print('test perf: q %.4f %%, p %.4f %%' % (test_perf[0]*100., test_perf[1]*100.))\n if val_q_perf > best_val_q_perf:\n best_val_q_perf = val_q_perf\n ret_test_perf = test_perf\n stop_count = 0\n else:\n stop_count += 1\n if stop_count == patience:\n break\n return ret_test_perf",
"def test_batch_gen (data_dir, values=list(range(0,19)), shuffle=True, relabel=True, trim=False,\n trim_ind=(0, 720), reshape=True, new_shape = (640, 360), preprocess=False, \n new_labels=False, denoise=False):\n\n if len(values) < 1:\n raise ValueError('values array is empty')\n \n def get_test_batch(batch_size=12):\n \"\"\"\n Generate batches of images and labels for testing \n \n :param batch_size: size of batch\n :return: images, labels, names\n \"\"\"\n \n global original_size\n image_paths = glob(os.path.join(data_dir, 'images', '*.jpg'))\n image = imageio.imread(image_paths[0])\n original_size = (image.shape[1], image.shape[0])\n \n if shuffle:\n random.shuffle(image_paths)\n for i in range(0, len(image_paths), batch_size):\n images = []\n labels = []\n names = []\n for path in image_paths[i:i+batch_size]:\n image_name = os.path.basename(path)\n names.append(image_name)\n label_name = image_name[:-4] + '_train_id.png'\n label_path = os.path.join(data_dir, 'labels', label_name)\n label = imageio.imread(label_path)\n image = imageio.imread(path)\n if relabel:\n relabel_vehicles(label)\n relabel_pedestrian(label)\n relabel_background(label)\n if new_labels:\n new_label_20(label)\n new_label_30(label)\n if trim:\n image = image[trim_ind[0]:trim_ind[1]]\n new_label = np.zeros((original_size[1], original_size[0]), dtype=np.uint8)\n new_label[trim_ind[0]:trim_ind[1]] = label[trim_ind[0]:trim_ind[1]]\n label = new_label\n if reshape:\n image = cv2.resize(image, new_shape)\n if preprocess:\n image = image_preprocessing(image, denoise=denoise)\n label = one_hot_label(label, values)\n images.append(image)\n labels.append(label)\n\n images = np.array(images, dtype=np.uint8)\n labels = np.array(labels, dtype=np.uint8)\n yield images, labels, names\n \n def revert_trim_reshape (preds):\n \"\"\"\n Batch generator maybe trim and resize images. This function is used to revert\n predicted argmax labels for comparison during evaluation.\n \n :param pred: batch of label prediction from network\n :return: predictions of original image size\n \"\"\"\n \n if original_size == None:\n raise ValueError('original_size has not been set')\n if len(preds.shape) != 3:\n raise ValueError('preds array must be 3D argmax (batch_size, height, width)')\n if trim == False and reshape == False:\n return preds\n new_preds = np.zeros((preds.shape[0], original_size[1], original_size[0]), dtype=np.uint8)\n for i, pred in enumerate(preds):\n if reshape and trim:\n pred = cv2.resize(pred, (original_size[0], trim_ind[1]-trim_ind[0]), interpolation=cv2.INTER_NEAREST)\n elif reshape:\n pred = cv2.resize(pred, original_size, interpolation=cv2.INTER_NEAREST)\n if trim:\n new_preds[i, trim_ind[0]:trim_ind[1]] = pred\n else:\n new_preds[i] = pred\n return new_preds\n \n return get_test_batch, revert_trim_reshape",
"def get_test_batch(batch_size=12):\n \n global original_size\n image_paths = glob(os.path.join(data_dir, 'images', '*.jpg'))\n image = imageio.imread(image_paths[0])\n original_size = (image.shape[1], image.shape[0])\n \n if shuffle:\n random.shuffle(image_paths)\n for i in range(0, len(image_paths), batch_size):\n images = []\n labels = []\n names = []\n for path in image_paths[i:i+batch_size]:\n image_name = os.path.basename(path)\n names.append(image_name)\n label_name = image_name[:-4] + '_train_id.png'\n label_path = os.path.join(data_dir, 'labels', label_name)\n label = imageio.imread(label_path)\n image = imageio.imread(path)\n if relabel:\n relabel_vehicles(label)\n relabel_pedestrian(label)\n relabel_background(label)\n if new_labels:\n new_label_20(label)\n new_label_30(label)\n if trim:\n image = image[trim_ind[0]:trim_ind[1]]\n new_label = np.zeros((original_size[1], original_size[0]), dtype=np.uint8)\n new_label[trim_ind[0]:trim_ind[1]] = label[trim_ind[0]:trim_ind[1]]\n label = new_label\n if reshape:\n image = cv2.resize(image, new_shape)\n if preprocess:\n image = image_preprocessing(image, denoise=denoise)\n label = one_hot_label(label, values)\n images.append(image)\n labels.append(label)\n\n images = np.array(images, dtype=np.uint8)\n labels = np.array(labels, dtype=np.uint8)\n yield images, labels, names",
"def divide_labels():\n random.seed(10)\n\n if config.enable_modelarts:\n import moxing as mox\n mox.file.shift('os', 'mox')\n pic_names = os.listdir(config.data_path)\n dic = []\n with open(config.label_path) as f:\n for line in f:\n name = line.split()[1]+'.jpg'\n lst = map(int, line.split()[2:12])\n lst = list(lst)\n score = round(sum([(i+1)*j for i, j in enumerate(lst)])/sum(lst), 7)\n dic.append([name]+line.split()[2:12]+[score])\n df = pd.DataFrame(dic)\n df_new = df[df[0].isin(pic_names)].copy()\n df_new.reset_index(drop=True, inplace=True)\n test_img = random.sample(pic_names, 25597)\n\n test_label = df_new[df_new[0].isin(test_img)].copy()\n train_label = df_new[~df_new[0].isin(test_img)].copy()\n test_label.to_csv(config.val_label_path, header=0)\n train_label.to_csv(config.train_label_path, header=0)",
"def eval(self,i_db=None):\n assert isinstance(i_db, (list, tuple, tf.data.Dataset)), 'Got type: {}'.format(type(i_db))\n labels, preds = [], []\n for index, element in enumerate(i_db):#Processing for every single image\n print('(SegNets) Evaluating index = {}'.format(index))\n assert isinstance(element, (list, tuple, dict))\n if isinstance(element, (list, tuple)):\n image = element[0] # As my design. Shape = (None, height, width, nchannels)\n mask = element[1] # As my design. Shape = (None, height, width, num_classes)\n else:\n image = element['image'] # As my design. Shape = (None, height, width, nchannels)\n mask = element['label'] # As my design. Shape = (None, height, width, num_classes)\n if isinstance(image,(tf.Tensor,tf.SparseTensor)):\n image = image.numpy()\n else:\n assert isinstance(image,np.ndarray)\n if isinstance(mask,(tf.Tensor,tf.SparseTensor)):\n mask = mask.numpy()\n else:\n assert isinstance(mask,np.ndarray)\n \"\"\"Preprocess data\"\"\"\n assert len(image.shape) in (2,3,4),'Got shape: {}'.format(image.shape)\n if len(image.shape) in (2,3):#Single image\n \"\"\"Preprocess image\"\"\"\n if len(image.shape)==2:#Gray image with shape (height,width)\n image = np.expand_dims(image,axis=-1) # Shape: (height, width, 1)\n else:#Shape: (height, width, depth)\n pass\n assert len(image.shape) == 3, 'Got shape: {}'.format(image.shape)\n assert image.shape[-1] in (1, 3), 'Got shape: {}'.format(image.shape)\n \"\"\"Preprocess mask\"\"\"\n assert len(mask.shape) in (2, 3)\n if len(mask.shape) == 2:\n mask = np.expand_dims(mask, -1)\n else:\n assert mask.shape[-1] == 1 #Only gray image with shape (height, width, 1)\n \"\"\"Making batch for single image\"\"\"\n image = np.expand_dims(image,0)\n mask = np.expand_dims(mask,0)\n else:#Batch of images\n assert len(image.shape)== 4 #Shape: (None, height, width, depth)\n assert len(mask.shape) == 4 #Shape: (None, height, width, num_classes)\n assert len(image.shape)==4, 'Got shape: {}'.format(image.shape) #Shape: (None, height, width, depth)\n assert len(mask.shape)==4, 'Got shape: {}'.format(mask.shape) #Shape: (None, height, width, num_classes)\n assert image.shape[-1] in (1, 3), 'Got shape: {}'.format(image.shape)\n cpreds = self.predict(i_image=image) # Shape = (None, height, width, 1) as procesisng single image\n for pindex, cpred in enumerate(cpreds):\n cmask = np.expand_dims(np.argmax(mask[pindex],axis=-1),axis=-1)\n print(cpred.shape,cmask.shape)\n if np.sum(ListTuples.compare(i_x=cmask.shape,i_y=self.mask_shape)):\n cmask = SupFns.scale_mask(i_mask=cmask,i_tsize=self.mask_shape)\n else:\n pass\n labels.append(cmask) # Shape = (height, width, 1)\n preds.append(cpred) # Shape = (height, width, 1)\n print(labels[-1].shape,preds[-1].shape)\n if self.vdebug:\n plt.subplot(1,3,1)\n plt.imshow(image[pindex],cmap='gray')\n plt.title('Original Image')\n plt.subplot(1,3,2)\n plt.imshow(mask[pindex],cmap='gray')\n plt.title('Mask')\n plt.subplot(1,3,3)\n plt.imshow(cpred,cmap='gray')\n plt.title('Prediction')\n plt.show()\n else:\n pass\n \"\"\"Performance measurement\"\"\"\n evaluer = SegMetrics_2D(i_num_classes=self.vnum_classes,i_care_background=self.vcare_background)\n Logs.log('Using entire dataset')\n measures, measure_mean, measure_std = evaluer.eval(i_labels=labels, i_preds=preds,i_object_care=False)\n Logs.log('Measure shape = {}'.format(measures.shape))\n Logs.log('Measure mean = {}'.format(measure_mean))\n Logs.log('Measure std = {}'.format(measure_std))\n Logs.log('Using sub dataset that only consider images containing objects')\n measures, measure_mean, measure_std = evaluer.eval(i_labels=labels, i_preds=preds, i_object_care=True)\n Logs.log('Measure shape = {}'.format(measures.shape))\n Logs.log('Measure mean = {}'.format(measure_mean))\n Logs.log('Measure std = {}'.format(measure_std))\n return labels, preds",
"def data_processing(labels_df, x_train, y_train, label_map):\n subset = str()\n\n if labels_df.shape[0] == 32384 or labels_df.shape[0] == 3120 or labels_df.shape[0] == 16 or labels_df.shape[0] == 64:\n batch_size = 8 ### Modified for smaller images\n subset = \"train\"\n elif labels_df.shape[0] == 8080 or labels_df.shape[0] == 1920 or labels_df.shape[0] == 8:\n batch_size = 4\n subset = \"valid\"\n elif labels_df.shape[0] == 40669:\n batch_size = 4\n subset = \"test\" \n elif labels_df.shape[0] == 20522:\n batch_size = 2\n subset = \"test-add\" \n else:\n raise ValueError('The dataset format is different than expected')\n\n label_map = label_map\n# images_size = (256, 256)\n images_size = (64, 64)\n\n # Iterate through batches of rows of the dataset\n for i in range(labels_df.shape[0]//batch_size):\n \n temp_labels_df = labels_df.iloc[i*batch_size:((i+1)*batch_size) , :]\n \n # Iterate through the samples batch and create x and y for training\n for f, tags in tqdm(temp_labels_df.values, miniters=100):\n # load a .tif file\n img = io.imread('data/{}-jpg/{}.jpg'.format(subset,f)) ######## Modified for train jpg folder\n img = transform.resize(img, images_size)\n\n### Removed for use of JPEG files:\n# # Add NDVI layer // Removed for usage of JPG files\n# np.seterr(all='warn') # divide by zero, NaN values\n# img_ndvi = np.expand_dims((img[:, :, 3] - img[:, :, 2]) / (img[:, :, 3] + img[:, :, 2]), axis=2) # (NIR - RED) / (NIR + RED)\n# img = np.concatenate((img, img_ndvi), axis=2)\n \n # Create the target array for an image\n targets = np.zeros(17)\n for t in tags.split(' '):\n targets[label_map[t]] = 1 \n\n x_train.append(img)\n y_train.append(targets)\n\n # Format values\n y_train = np.array(y_train, np.uint8)\n x_train = np.array(x_train, np.float16) / 255.\n\n### Removed for use of JPEG files: \n# x_train = np.array(x_train, np.float16) / 65536.\n#### x_train -= 0.5\n#### x_train *= 2 \n\n\n # Save subsets in npz files\n np.save('data/{}-npy/npdatasetX{}'.format(subset, i), x_train)\n x_train = []\n np.save('data/{}-npy/npdatasetY{}'.format(subset, i), y_train)\n y_train = []\n #print \"{} data saved\".format(subset)",
"def get_cluster_labeling(model, dataloader, device=torch.device(\"cpu\")):\n model.eval()\n original_labels = []\n cluster_labels = []\n for batch in dataloader:\n images = batch[\"original\"].to(device)\n labels = batch[\"label\"].to(device)\n outputs = model(images, False)\n original_labels += labels.tolist()\n cluster_labels += torch.argmax(outputs, dim=1).tolist()\n return original_labels, cluster_labels",
"def __populate_segmentation_in_label(self, label, annotation, image_details):\n # if bbox comes as normalized, skip normalization.\n if max(annotation[\"bbox\"]) < 1.5:\n width = 1\n height = 1\n else:\n width = image_details[\"width\"]\n height = image_details[\"height\"]\n\n polygons = []\n if (\n type(annotation[\"segmentation\"]) is dict\n ): # segmentations are in uncompressed rle format\n rle = annotation[\"segmentation\"]\n if self.compressed_rle:\n compressed_rle = rle\n else:\n compressed_rle = mask.frPyObjects(rle, rle[\"size\"][0], rle[\"size\"][1])\n polygons = masktools.convert_mask_to_polygon(compressed_rle)\n else: # segmentation is list of vertices\n for segmentation in annotation[\"segmentation\"]:\n polygon = []\n # loop through vertices:\n for id, vertex in enumerate(segmentation):\n if (id % 2) == 0:\n # x-coordinates (even index)\n x = vertex / width\n polygon.append(x)\n\n else:\n y = vertex / height\n polygon.append(y)\n polygons.append(polygon)\n label[\"polygon\"] = polygons",
"def load_label(self, idx):\n\t\"\"\"\n label_400 = scipy.io.loadmat('{}/trainval/{}.mat'.format(self.part_dir, idx))['LabelMap']\n label = np.zeros_like(label_400, dtype=np.uint8)\n for idx, l in enumerate(self.labels_59):\n #idx_400 = self.labels_400.index(l) + 1\n #label[label_400 == idx_400] = idx + 1\n label = label[np.newaxis, ...]\n\t\"\"\"\n\tseg_im = Image.open('{}/segmentations/{}/{}/{}.jpg'.format(self.part_dir, self.obj_cls, \n\t\tself.part, idx))\n label = np.array(seg_im, dtype=np.uint8)\n #label = label[:,:,::-1]\n #label -= self.mean\n #label = label.transpose((2,0,1))\n\tlabel = label[np.newaxis, ...]\n return label",
"def visualize_training(self,batched_inputs, results):#image,heatmap):#,\n from pointscollection.utils import exVisualizer as Visualizer\n from detectron2.data.detection_utils import convert_image_to_rgb\n\n\n assert len(batched_inputs) == len(\n results\n ), \"Cannot visualize inputs and results of different sizes\"\n # storage = get_event_storage()\n max_boxes = 20\n\n image_index = 0 # only visualize a single image\n img = batched_inputs[image_index][\"image\"]\n img = convert_image_to_rgb(img.permute(1, 2, 0), \"BGR\")\n print(batched_inputs[0]['file_name'],batched_inputs[0]['image_id'])\n\n\n # v_gt = Visualizer(img, None)\n # # v_gt = v_gt.overlay_instances(boxes=batched_inputs[image_index][\"instances\"].gt_boxes)\n # anno_img = v_gt.get_image()\n processed_results = _postprocess(results[image_index], img.shape[0], img.shape[1])\n predicted_boxes = processed_results.pred_boxes.tensor.detach().cpu().numpy()\n predicted_mask = processed_results.pred_masks.detach().cpu().numpy()\n predicted_points=processed_results.pred_points.detach().cpu().numpy()\n\n v_pred = Visualizer(img, None)\n v_pred = v_pred.overlay_instances(boxes=predicted_boxes[0:max_boxes],masks=predicted_mask[0:max_boxes],points=predicted_points[0:max_boxes])\n prop_img = v_pred.get_image()\n vis_img =prop_img# np.vstack((anno_img, prop_img))\n # vis_img = vis_img.transpose(2, 0, 1)\n # vis_name = f\"Top: GT bounding boxes; Bottom: {max_boxes} Highest Scoring Results\"\n # plt.imshow(vis_img)\n # plt.show()\n plt.imsave('output/result_show/{:0>12}.png'.format(batched_inputs[0]['image_id']),vis_img)\n \n\n # storage.put_image(vis_name, vis_img)\n # img = image[0]\n # img=img*self.pixel_std+self.pixel_mean\n # img = convert_image_to_rgb(img.permute(1, 2, 0), \"BGR\")\n # ht=heatmap[0]\n # ht=torch.sigmoid(ht)\n\n # ht=ht.cpu().numpy()\n # ht=np.max(ht,axis=0)\n # plt.imshow(np.uint8(img))\n # plt.show()\n # plt.imshow(ht)\n # plt.show()",
"def make_label(self, image_paths, mask_paths, image_spxl_paths, mode=\"gray\"):\n for spxl_path in tqdm(image_spxl_paths, total=len(image_spxl_paths)):\n spxlname = spxl_path.split(\"\\\\\")[-1][\n : spxl_path.split(\"\\\\\")[-1].rindex(\"_\")\n ]\n for image_path, mask_path in zip(image_paths, mask_paths):\n\n imagename = image_path.split(\"\\\\\")[-1].split(\".\")[0]\n maskname = mask_path.split(\"\\\\\")[-1].split(\".\")[0]\n\n if spxlname == imagename and spxlname == maskname:\n\n logger.info(\n f\"{spxlname}(superpixel name) matches {imagename}(image) and {maskname}(mask)\"\n )\n folder = spxl_path.split(\"_\")[-1].split(\".\")[0]\n\n logger.info(\"[Loading images]\")\n image = cv2.imread(image_path)\n mask = cv2.imread(mask_path, 0)\n slic_image = joblib.load(os.path.join(spxl_path))\n\n logger.info(\"[Separating segments....]\")\n if mode == \"RGB\":\n superpixels = self.individual_superpixel(\n slic_image, image, mode\n )\n else:\n superpixels = self.individual_superpixel(\n slic_image, image, mode\n )\n\n for i, spxl in tqdm(enumerate(superpixels), total=len(superpixels)):\n\n # np.save(f'Labels\\\\RGB_superpixels\\\\{spxlname}_{folder}_{i+1}_binary.npy', spxl)\n # hkl.dump(spxl, f'Labels\\\\hickle_labels\\\\{spxlname}_{folder}_{i + 1}_binary.hkl')\n\n res = spxl * mask\n\n if len(np.unique(res)) >= 2:\n\n # np.save(f'Labels\\\\RGB_superpixels\\\\{spxlname}_{folder}_{i + 1}_multiclass.npy', spxl)\n # np.savetxt(f'Labels\\\\mask_ground_truth\\\\{spxlname}_{folder}_{i + 1}_gt.txt', res)\n logger.info(\n f\"Labels\\\\mask_ground_truth\\\\{spxlname}_{folder}_{i + 1}_gt.npy [SAVED]\"\n )\n # hkl.dump(spxl, f'Labels\\\\hickle_labels\\\\{spxlname}_{folder}_{i + 1}_multiclass.hkl')\n\n self.y.append(1)\n\n if folder == \"Bacterial leaf blight\":\n self.mcy.append(0)\n logger.info(\n f\"Superpixel {i + 1} is diseased with Bacterial Leaf blight\"\n )\n\n if folder == \"Brown spot\":\n self.mcy.append(1)\n logger.info(\n f\"Superpixel {i + 1} is diseased with Brown spot\"\n )\n\n if folder == \"Leaf smut\":\n self.mcy.append(2)\n logger.info(\n f\"Superpixel {i + 1} is diseased with Leaf smut\"\n )\n\n else:\n self.y.append(0)\n\n logger.info(f\"{spxlname} done \\n\")\n\n logger.info(\"[SAVING hickle files] .....\")\n\n # np.save(f'Labels\\\\RGB_superpixels\\\\binary_labels(RGB).npy', self.y)\n # hkl.dump(f'Labels\\\\hickle_labels\\\\binary_labels(RGB).hkl', self.y)\n\n logger.info(\"[Binary classification superpixels and labels have been saved]\")\n\n # np.save(f'Labels\\\\RGB_superpixels\\\\multiclass_labels(RGB).npy', self.mcy)\n # hkl.dump(f'Labels\\\\hickle_labels\\\\binary_labels(RGB).hkl', self.mcy)\n\n logger.info(\n \"[Multiclass classification superpixels and labels have been saved]\"\n )",
"def unbatchify(data):\n images_ = []\n labels_ = []\n\n for image, label in data.unbatch().as_numpy_iterator():\n images_.append(image)\n labels_.append(unique_breeds[np.argmax(label)])\n return images_, labels_",
"def getPrediction(nnOutput):\n\treturn [nnOutput, 1.0]",
"def set_label(self, label):\n # check label makes sense\n if not isinstance(label, np.ndarray):\n raise TypeError(\"Label should be numpy array\")\n\n # only fill in attribute if the right size\n if label.shape[0] == self.points.shape[0]:\n self.sem_label = (label // 1000).astype(np.uint8) # semantic label in lower half\n self.inst_label = (label % 1000).astype(np.uint8) # instance id in upper half\n cls, cnt = np.unique(self.sem_label, return_counts=True)\n unknown_clss = [9,12,18,22]\n for unknown_cls in unknown_clss:\n if unknown_cls in np.unique(self.sem_label):\n print(unknown_cls, cnt[cls==unknown_cls])\n else:\n print(\"Points shape: \", self.points.shape)\n print(\"Label shape: \", label.shape)\n raise ValueError(\"Scan and Label don't contain same number of points\")\n\n # sanity check\n assert((self.inst_label + (self.sem_label * 1000) == label).all())\n\n # self.augmentor()\n\n if self.project:\n self.do_label_projection()",
"def get_train_batch(batch_size=12):\n \n image_paths = glob(os.path.join(data_dir, 'images', '*.jpg'))\n if shuffle:\n random.shuffle(image_paths)\n for i in range(0, len(image_paths), batch_size):\n images = []\n labels = []\n names = []\n for path in image_paths[i:i+batch_size]:\n image_name = os.path.basename(path)\n names.append(image_name)\n label_name = image_name[:-4] + '_train_id.png'\n label_path = os.path.join(data_dir, 'labels', label_name)\n label = imageio.imread(label_path)\n image = imageio.imread(path)\n if relabel:\n relabel_vehicles(label)\n relabel_pedestrian(label)\n relabel_background(label)\n if new_labels:\n new_label_20(label)\n new_label_30(label)\n if trim:\n image = image[trim_ind[0]:trim_ind[1]]\n label = label[trim_ind[0]:trim_ind[1]]\n if reshape:\n image = cv2.resize(image, new_shape)\n label = cv2.resize(label, new_shape, interpolation=cv2.INTER_NEAREST)\n if preprocess:\n image = image_preprocessing(image, denoise=denoise)\n label = one_hot_label(label, values)\n images.append(image)\n labels.append(label)\n \n images = np.array(images, dtype=np.uint8)\n labels = np.array(labels, dtype=np.uint8)\n \n yield images, labels, names",
"def nearest_neighbor_classify(train_image_feats,\n train_labels,\n test_image_feats,\n k=3):\n\n pred_labels = []\n\n #############################################################################\n # TODO: YOUR CODE HERE\n #############################################################################\n M, d = test_image_feats.shape\n dist = pairwise_distances(test_image_feats, train_image_feats)\n for i in range(M):\n sorted = np.argsort(dist[i])\n sorted = sorted[1:]\n list = [train_labels[i] for i in sorted[:k]]\n list_set = set(list)\n n = list.count\n pred_labels.append(max(list_set, key = n))\n \n #############################################################################\n # END OF YOUR CODE\n #############################################################################\n return pred_labels",
"def get_inference_image(self):\n for detection in self.cvOut[0,0,:,:]:\n score = float(detection[2])\n if score > self.Threshold:\n left = int(detection[3] * self.cols)\n top = int(detection[4] * self.rows)\n right = int(detection[5] * self.cols)\n bottom = int(detection[6] * self.rows)\n\n # Draw the bounding-box on the image\n cv2.rectangle(self.result_image,(left, top),(right, bottom), (23, 230, 210), thickness=2)\n cv2.drawMarker(self.result_image,get_rect_centre(left, top,right, bottom),(255,0,0))\n cv2.putText(self.result_image, self.label_dict[int(detection[1])] + \" : \" + str(round(score,4)),\\\n (int(left-10),int(top-10)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,255), 2)\n\n print(\"[INFO] Result image generated successfully.\")\n return self.result_image",
"def get_minibatch(roidb, num_classes):\n num_images = len(roidb)\n # Sample random scales to use for each image in this batch\n random_scale_inds = npr.randint(\n 0, high=len(cfg.TRAIN.SCALES), size=num_images)\n # rois_per_image = cfg.TRAIN.ROIS_PER_IM\n\n # Get the input image blob, formatted for caffe\n # im_crops is define as RoIs with form (y1,x1,y2,x2)\n im_blob, im_scales, im_crops, im_shapes = _get_image_blob(\n roidb, random_scale_inds)\n\n # row col row col to x1 y1 x2 y2\n im_crops = np.array(im_crops, dtype=np.uint16)\n im_crops = im_crops[:, (1, 0, 3, 2)]\n\n blobs = {'data': im_blob}\n\n # Now, build the region of interest and label blobs\n rois_blob = np.zeros((0, 5), dtype=np.float32)\n rois_context_blob = np.zeros((0, 9), dtype=np.float32)\n rois_frame_blob = np.zeros((0, 9), dtype=np.float32)\n rois_scores_blob = np.zeros((0, 1), dtype=np.float32)\n roi_num_blob = np.zeros((0, 1), dtype=np.float32)\n labels_blob = np.zeros((0, num_classes), dtype=np.float32)\n cpg_filter_blob = np.zeros((0, num_classes), dtype=np.float32)\n cpg_io_blob = np.zeros((0, 1), dtype=np.float32)\n for i_im in xrange(num_images):\n # x1 y1 x2 y2\n im_rois = roidb[i_im]['boxes'].astype(np.float32)\n im_labels = roidb[i_im]['gt_classes']\n if cfg.USE_ROI_SCORE:\n im_roi_scores = roidb[i_im]['box_scores']\n\n im_crop = im_crops[i_im]\n\n # Check RoI\n datasets.ds_utils.validate_boxes(\n im_rois, width=im_shapes[i_im][1], height=im_shapes[i_im][0])\n\n rois_per_this_image = np.minimum(cfg.TRAIN.ROIS_PER_IM,\n im_rois.shape[0])\n im_rois = im_rois[:rois_per_this_image, :]\n if cfg.USE_ROI_SCORE:\n im_roi_scores = im_roi_scores[:rois_per_this_image]\n\n if cfg.TRAIN.CPG_CACHE:\n filter_blob_this = np.zeros(\n (rois_per_this_image, num_classes), dtype=np.float32)\n for target_size in cfg.TRAIN.SCALES:\n if target_size == cfg.TRAIN.SCALES[random_scale_inds[i_im]]:\n continue\n filter_name = str(db_inds[i_im] * 10000 + target_size)\n # print filter_name\n filter_path = os.path.join(cfg.TRAIN.CPG_CACHE_PATH,\n filter_name)\n\n if os.path.exists(filter_path):\n filter_this = cpg.cpg_utils.binaryfile_to_blobproto_to_array(\n filter_path).astype(np.float32)\n # filter_blob_this = np.logical_or(\n # filter_blob_this,\n # cpg.cpg_utils.binaryfile_to_blobproto_to_array(filter_path)).astype(np.float32)\n # filter_blob_this = np.add(\n # filter_blob_this,\n # cpg.cpg_utils.binaryfile_to_blobproto_to_array(filter_path)).astype(np.float32)\n filter_blob_this = np.maximum(filter_blob_this,\n filter_this)\n io_blob_this = np.array(\n [\n db_inds[i_im] * 10000 +\n cfg.TRAIN.SCALES[random_scale_inds[i_im]]\n ],\n dtype=np.float32)\n\n cpg_filter_blob = np.vstack((cpg_filter_blob, filter_blob_this))\n cpg_io_blob = np.vstack((cpg_io_blob, io_blob_this))\n\n if cfg.TRAIN.ROI_AU:\n offset = 1.0 / im_scales[i_im] / cfg.SPATIAL_SCALE\n offset_step = cfg.TRAIN.ROI_AU_STEP\n\n if cfg.TRAIN.CPG_CACHE:\n filter_blob_this_sum = np.sum(filter_blob_this, 1)\n au_ind = filter_blob_this_sum == 0\n else:\n au_ind = np.ones(rois_per_this_image, dtype=np.bool)\n offsets = np.random.randint(\n 2 * offset_step + 1, size=(np.sum(au_ind),\n 4)).astype(np.float32)\n offsets -= offset_step\n offsets *= offset\n\n au_rois_o = im_rois[au_ind]\n au_rois_n = im_rois[~au_ind]\n au_rois = au_rois_o + offsets\n\n keep = datasets.ds_utils.filter_validate_boxes(\n au_rois, im_shapes[i_im][1], im_shapes[i_im][0])\n au_rois[~keep] = au_rois_o[~keep]\n\n ovrs = datasets.ds_utils.overlaps(au_rois, au_rois_n)\n thresholded = ovrs >= 0.5\n keep = np.sum(thresholded, 1) == 0\n au_rois[~keep] = au_rois_o[~keep]\n\n # im_rois = np.vstack((im_rois, au_rois))\n im_rois[au_ind] = au_rois\n\n # rois_per_this_image = np.minimum(cfg.ROIS_PER_IM, im_rois.shape[0])\n # im_rois = im_rois[:rois_per_this_image, :]\n # if cfg.USE_ROI_SCORE:\n # au_roi_scores = im_roi_scores[au_ind]\n # im_roi_scores = np.vstack((im_roi_scores, au_roi_scores))\n # im_roi_scores = im_roi_scores[:rois_per_this_image]\n\n # roidb[i_im]['boxes'] = im_rois\n\n if cfg.CONTEXT:\n im_inner_rois, im_outer_rois = get_inner_outer_rois(\n im_rois, cfg.CONTEXT_RATIO)\n\n # project\n rois = _project_im_rois(im_rois, im_scales[i_im], im_crop)\n if cfg.CONTEXT:\n rois_inner = _project_im_rois(im_inner_rois, im_scales[i_im],\n im_crop)\n rois_outer = _project_im_rois(im_outer_rois, im_scales[i_im],\n im_crop)\n\n batch_ind = i_im * np.ones((rois.shape[0], 1))\n rois_blob_this_image = np.hstack((batch_ind, rois))\n rois_blob = np.vstack((rois_blob, rois_blob_this_image))\n if cfg.CONTEXT:\n rois_context_blob_this_image = np.hstack((batch_ind, rois_outer,\n rois))\n rois_context_blob = np.vstack((rois_context_blob,\n rois_context_blob_this_image))\n\n rois_frame_blob_this_image = np.hstack((batch_ind, rois,\n rois_inner))\n rois_frame_blob = np.vstack((rois_frame_blob,\n rois_frame_blob_this_image))\n\n if cfg.USE_ROI_SCORE:\n rois_scores_blob = np.vstack((rois_scores_blob, im_roi_scores))\n else:\n rois_scores_blob = np.vstack((rois_scores_blob, np.zeros(\n (rois_per_this_image, 1), dtype=np.float32)))\n\n # Add to labels\n if cfg.USE_BG:\n im_labels = np.hstack((im_labels, [1.0]))\n labels_blob = np.vstack((labels_blob, im_labels))\n\n im_roi_num = np.ones((1))\n im_roi_num[0] = rois.shape[0]\n roi_num_blob = np.vstack((roi_num_blob, im_roi_num))\n\n # For debug visualizations\n # _vis_minibatch(im_blob, rois_blob, labels_blob)\n\n blobs['roi'] = rois_blob\n if cfg.CONTEXT:\n blobs['roi_context'] = rois_context_blob\n blobs['roi_frame'] = rois_frame_blob\n\n if cfg.USE_ROI_SCORE:\n # n * 1 to n\n blobs['roi_score'] = np.add(\n np.reshape(rois_scores_blob, [rois_scores_blob.shape[0]]), 1)\n else:\n blobs['roi_score'] = np.ones((rois_blob.shape[0]), dtype=np.float32)\n\n blobs['roi_num'] = roi_num_blob\n\n blobs['label'] = labels_blob\n\n if cfg.TRAIN.CPG_CACHE:\n blobs['cpg_filter'] = cpg_filter_blob\n blobs['cpg_io'] = cpg_io_blob\n\n # print \"rois_blob: \", rois_blob\n # print \"rois_context_blob: \", rois_context_blob\n # print \"rois_frame_blob: \", rois_frame_blob\n # print \"rois_scores_blob: \", rois_scores_blob\n # print \"labels_blob: \", labels_blob\n\n if cfg.TRAIN.ROI_AU:\n return blobs, roidb\n return blobs"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Saves the current session's tweets to tweets.json.
|
def save_tweets():
with open("tweets.json", "w") as outfile:
json.dump(session["tweets"], outfile)
|
[
"def save_tweet(self,tweet):\n with open(self.tweets_file, \"ab\") as output:\n output.write(tweet['id']+','+tweet['created']+','+tweet['text']+','+tweet['retweet_count']+','+tweet['favorite_count']+','+tweet['lang']+','+tweet['country']+','+tweet['city']+','+tweet['province']+'\\n')\n self.file_line_counter+=1\n self.on_save_tweet()",
"def put_tweets(self):\n\t\tscreen_name = self.screen_name\n\t\tself.get_user_retweets()\n\t\tself.retweet_df[\"date\"] = pd.to_datetime(self.retweet_df['created_at']).dt.date\n\t\tself.retweet_df = self.retweet_df[self.retweet_df[\"date\"] >= self.__START_DATE]\n\t\tself.retweet_df = self.retweet_df.drop(\"date\",axis=1)\n\t\twrite_to_file(self.file_path,self.retweet_df,self.screen_name)\n\t\tprint(\"--- done for {} ---\".format(screen_name))",
"def _save_tweet_data(self, index=0, tweet=None):\n all_tweet_data = dict()\n #Prepare all_tweet_data; attempt to load existing data\n if os.path.exists(self.config.filenames['stats']): #Load existing data\n with open(self.config.filenames['stats'], 'r', encoding=\"utf8\") as infile:\n all_tweet_data = json.load(infile)\n copyfile(self.config.filenames['stats'], self.config.filenames['stats'] + \".bak\")\n else:\n all_tweet_data = {\"feed_index\": 0}\n #Edit all_tweet_data\n if BotFunctions.SaveTweetIndex in self.functionality and index > 0:\n all_tweet_data['feed_index'] = index\n if BotFunctions.SaveTweetStats in self.functionality and tweet:\n if tweet.author.id == self.config.my_id: #Bot tweeted this\n all_tweet_data['tweet_stats'][tweet.id]['title'] = tweet.title\n #Save all_tweet_data to config.filenames['stats']\n with open(self.config.filenames['stats'], 'w', encoding=\"utf8\") as outfile:\n json.dump(all_tweet_data, outfile)",
"def save_all(self):\n\t\tfor tweet in self.list_of_tweets:\n\t\t\tself.__save_tweet(tweet)\n\t\tself.list_of_tweets = []\n\n\t\tlog.info(\"Save all tweets\")\n\n\t\tfor user in self.list_of_users:\n\t\t\tself.__save_user(user)\t\n\t\tself.list_of_users = []\n\n\t\tlog.info(\"Save all users\")",
"def save_tweet(self,tweet):\n print '.',\n return",
"def save(self, request, response, pathmatch):\n\n if 'user' not in request.session:\n raise StopProcessing(400, \"You need to be logged in.\")\n\n try:\n status = request.params['status']\n except KeyError:\n raise StopProcessing(500, \"No status given.\")\n\n try:\n tweets = Tweets(self.db_connection)\n tweets.createTweet(request.session['user'].fullname, status)\n except IOError:\n raise StopProcessing(500, \"Unable to connect to data file.\")\n\n d = {'tweets': self.getTweets(), 'message': 'Great! Now the world knows.', 'user': request.session['user']}\n response.send_template('minitwitter.tmpl', d)",
"def retweet():\n tw_id = request.args.get(\"tweet\")\n\n tws = session[\"tweets\"]\n tws[tw_id][\"retweet_time\"] = datetime.now().strftime(\"%m/%d/%Y %H:%M:%S\")\n tws[tw_id][\"retweeter\"] = session[\"user\"]\n\n session[\"tweets\"] = tws\n save_tweets()\n\n return redirect(\"/personal_feed\")",
"def __save(self):\n self.log.debug(\"SAVING TWEETS\")\n with concurrent.futures.ThreadPoolExecutor() as executor:\n futures = []\n for tweet in self.response.get('data', []):\n if not self.mongodb.is_in(tweet['id']):\n self.log.debug(tweet)\n # process each tweet ib parallel\n fut = executor.submit(util.pre_process_tweets_response, tweet, self.response['includes'])\n fut.add_done_callback(self.__save_callback)\n futures.append(fut)\n else:\n # if the tweet is already in the db not save it and update the value of the number of tweets saved.\n self.total_result -= 1\n for job in tqdm(as_completed(futures), total=len(futures), desc=\"INFO:SEARCH:SAVING\", leave=False, position=1):\n pass\n\n self.mongodb.save_many(self._all)\n # clean the list populate with these tweets processed.\n self._all = []",
"def save_tweet(sentence):\n tweet = {\n 'tweet_content': sentence,\n 'created_at': datetime.now(),\n }\n tweet_id = tweet_coll.insert_one(tweet).inserted_id\n\n # ADD PIECE THAT TWEETS IT OUT\n # Create a tweet\n api.update_status(status =sentence)\n\n sentence = markovChain.random_walk(random.randint(2, 20))\n return redirect(url_for('index', sentence=sentence))",
"def write_to_json(twitter_profiles, recent_tweets, users, filename):\n twitter_profiles_json_str = json.dumps(twitter_profiles, indent = 4, sort_keys=True, ensure_ascii=True)\n recent_tweets_json_str = json.dumps(recent_tweets, indent = 4, sort_keys=True, ensure_ascii=True)\n users_json_str = json.dumps(users, indent = 4, sort_keys=True, ensure_ascii=True)\n # write json if needed\n with open('users/users_' + str(datetime.datetime.now()).replace('-','').replace(' ','') + \"_\" + args.file.split('/')[-1], 'w') as f:\n f.write(users_json_str)\n with open('recent_tweets/recent_tweets_' + str(datetime.datetime.now()).replace('-','').replace(' ','') + \"_\" + args.file.split('/')[-1], 'w') as f:\n f.write(recent_tweets_json_str)\n with open('user_profiles/user_profiles_' + str(datetime.datetime.now()).replace('-','').replace(' ','') + \"_\" + args.file.split('/')[-1], 'w') as f:\n f.write(twitter_profiles_json_str)\n print 'json files have been written.'",
"def save_movies():\n with open(\"json/movies.json\", \"w+\") as movies_file:\n json.dump(movies, movies_file)",
"def tweet_out(self, tweet):\n for k in query_db('SELECT * ' \\\n 'FROM accounts ' \\\n 'INNER JOIN tweetswarmaccount '\\\n 'ON account.access_token=tweetswarmaccount.account_id '\\\n 'WHERE tweetswarmaccount.tweetswarm=?', ([self.id])\n ):\n s = Account()\n s.access_key = k['access_token']\n s.access_secret = k['access_secret']\n self.slaves.append(s)\n for slave in self.slaves:\n slave.tweet(tweet)\n\n query_db('UPDATE tweetswarms' \\\n 'SET lasttweeted=?' \\\n 'WHERE id=?' ([tweet, self.id])\n )\n g.db.commit()\n return True",
"def save(self):\n\n if self.authorized:\n PyFunceble.helpers.Dict(self.tracker).to_json_file(self.hashes_file)\n\n PyFunceble.LOGGER.info(f\"Saved tracked into {self.hashes_file!r}.\")",
"def save_feeds():\n with open(os.path.join(__location__, 'feeds.json'), 'w') as f:\n json.dump(feeds_dict, f, indent=4)",
"def write_tweets(tweets, outfile):\n # Create an outout file\n f = open(outfile, 'w')\n # For each tweet in the array of tweets write it out to the output file\n for tweet in tweets:\n # write each dictionary plus a new line character\n f.write(str(tweet) + '\\n')\n # Close the file\n f.close()",
"def update_tweet_stats(self, tweet):\n self._save_tweet_data(tweet=tweet)",
"def save():\n # Open todos.json in write mode and dump the todos into it\n with open('todos.json', 'w') as f:\n json.dump(todos, f)\n\n # Add an alert message to the session to be displayed on the page\n session[\"alert\"] = {\n \"level\": \"success\",\n \"message\": \"To-Do list saved!\"\n }\n\n # Redirect back to the index\n return redirect(\"/\")",
"def get_all_tweets(screen_name):\n #Twitter only allows access to a users most recent 3240 tweets with this method\n \n #authorize twitter, initialize tweepy\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_key, access_secret)\n api = tweepy.API(auth)\n \n #initialize a list to hold all the tweepy Tweets\n alltweets = [] \n \n #make initial request for most recent tweets (200 is the maximum allowed count)\n new_tweets = api.user_timeline(screen_name = screen_name, count=200, include_rts = True)\n \n #only do this for users that have actually tweeted\n if len(new_tweets) > 0:\n #save most recent tweets\n alltweets.extend(new_tweets)\n \n #save the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n \n #keep grabbing tweets until there are no tweets left to grab\n while len(new_tweets) > 0:\n \n #all subsiquent requests use the max_id param to prevent duplicates\n new_tweets = api.user_timeline(screen_name = screen_name,count=200, max_id=oldest, include_rts = True)\n \n #save most recent tweets\n alltweets.extend(new_tweets)\n \n #update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n \n print \"...%s tweets downloaded so far\" % (len(alltweets))\n \n # Save tweets for user in a json file\n fname = \"tweets/\"+str(screen_name)+\".jsonl\"\n with open(fname, 'w') as f:\n for status in alltweets:\n f.write(json.dumps(status._json)+\"\\n\")\n \n #close the file\n print \"Done with \" + str(screen_name)\n time.sleep(60)\n print \"Sleeping for one minute\"",
"def add_tweet_data(tweet):\n req_url = '{0}{1}'.format(eleanor_url, 'add-tweet-data')\n headers = {'content-type': 'application/json'}\n payload = json.dumps(tweet)\n requests.post(req_url, headers=headers, data=payload)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Deletes a tweet from the server. Requires the current user to be logged in and deleting a tweet they posted.
|
def delete_tweet():
tw_id = request.args.get("tweet")
global_feed = request.args.get("global")
tws = session["tweets"]
tws.pop(tw_id)
session["tweets"] = tws
save_tweets()
if global_feed == "True":
return redirect("/global_feed")
else:
return redirect("/personal_feed")
|
[
"def delete_tweet(self, id=None):\n try:\n # Destroy the status object with the id equal to the passed id\n self.api.destroy_status(id=str(id))\n\n except Exception as e:\n print(str(e))\n sys.exit(0)",
"def remove_tweet(cls, cursor, tweet_id):\n cursor.execute(\n \"\"\"\n DELETE FROM tweets\n WHERE id=%s\n \"\"\",\n (tweet_id,)\n )",
"def delete_all(self, tweets=None, verbose=False):\n if tweets is None:\n raise Exception(\"Error: Invalid 'tweets' object passed\")\n count = len(tweets)\n if count == 0:\n print(\"Status: No tweets to delete!!\")\n sys.exit(0)\n\n if verbose:\n n = 1\n print(\"\\nFiltered Tweets:\")\n for tweet in tweets.values():\n print(\"Tweet %d\" % n)\n n += 1\n for keys, values in tweet.items():\n print(\"{} : {}\".format(keys, values))\n\n confirmation = input(\n \"\\n{} tweets will be deleted. Press (y/n) to confirm: \".format(count)\n ).lower()\n if confirmation == \"n\":\n sys.exit(0)\n elif confirmation == \"y\":\n for id in tweets.keys():\n self.delete_tweet(id)\n print(\"Status: Tweet with ID: {} deleted!\".format(id))\n\n print(\"Status: {} tweets deleted successfully!\".format(count))",
"def delete(self):\n self.api._post('users/delete', user_id=self.user_id)",
"def turtles_delete(tid):\n db = request.db\n Turtle = db.tables.Turtle\n res = Turtle.query.filter_by(id=tid).first_or_404()\n if res.name == 'Timmy':\n return \"Undeletable\\n\", 401\n db.session.delete(res)\n db.session.commit()\n return \"\"",
"def removeCorpus(request):\n corpusid = request.GET[\"corpusid\"] if request.method == \"GET\" else request.POST[\"corpusid\"]\n session = Session.objects.all().filter(id=corpusid).first()\n folder = os.path.join(settings.BASE_PROJECT_DIR, session.folder)\n\n manager = TweetIO.getManager()\n fetcher = manager.get(corpusid)\n\n class OnCancelListener(TweetIO.FetcherProgressListener):\n def onCancel(self):\n shutil.rmtree(folder)\n session.delete()\n if str(corpusid) in manager.fetchers:\n manager.fetchers.pop(str(corpusid))\n\n if fetcher is not None:\n fetcher.addListener(OnCancelListener())\n fetcher.cancel()\n else:\n shutil.rmtree(folder)\n session.delete()\n\n return HttpResponse(\"success\")",
"def delete(self, password, message=\"\"):\n data = {'user': self.user.name,\n 'passwd': password,\n 'delete_message': message,\n 'confirm': True}\n return self.request_json(self.config['delete_redditor'], data=data)",
"def delete_user(id):\n return u.delete(id)",
"def delete(self, user_id: Union[UUID, str],\n token_id: Union[UUID, str]) -> NoReturn:\n raise NotImplementedError()",
"def delete_user_by_username(self, userName):\n return self.make_request(\"/users/{0}\".format(userName), method='DELETE')",
"def delete_blog_post(request, blog_post_id):\n if request.user.is_superuser:\n blog_post = get_object_or_404(BlogPost, pk=blog_post_id)\n blog_post.delete()\n messages.info(request, 'Blog post deleted!')\n else:\n messages.error(request, 'Sorry, you do not have permission for that.')\n return redirect(reverse('home'))\n\n return redirect(reverse('blog'))",
"def delete_user(self, instance, name):\r\n return instance.delete_user(name)",
"def delete(self, uuid):\n\n #take current session user\n current_user = user_return()\n #if requested user status admin\n if current_user.is_admin:\n #take current user form view\n user = db.session.query(UserModel).filter_by(uuid = uuid).first()\n #delete user\n db.session.delete(user)\n db.session.commit()\n flash(\"Deletion was successful\",category='success')\n else:\n #current session user status is not admin\n flash(\"No permission for this page\",category='danger')\n return redirect(url_for('main'))",
"def destroy(self):\n\n todo = Todo.find(self.request.param('id'))\n todo.delete()\n return self.request.redirect('/todo')",
"def delete_favourite():\n if request.method == \"POST\":\n user_id = mongo.db.users.find_one({\"username\": session[\"user\"]})[\"_id\"]\n favourite = request.form.get(\"wine_id\")\n\n mongo.db.users.update({\"_id\": ObjectId(user_id)}, {\"$pull\":\n {'favourites': {\"wine_id\": favourite}}})\n\n flash(\"Wine has now been removed from your favourites\")\n return redirect(url_for('profile'))",
"def delete(self, thread_id):\n # Send request\n response = self._delete(\n url=\"/threads/id/{thread_id}\".format(thread_id=thread_id)\n )\n\n return response",
"def remove(self):\n url = API_PATH['live_remove_update'].format(id=self.update.thread.id)\n data = {'id': self.update.fullname}\n self.update.thread._reddit.post(url, data=data)",
"def delete_post():\n post = mongo.db.Posts\n _id = request.json['_id']\n post.delete_one({'_id': ObjectId(_id)})\n\n return jsonify({'result': \"Post deleted!\"})",
"def delete(self, id):\n\n # Fetch Favourite\n favourite = Favourite.query.filter_by(id=id).first()\n if favourite is None:\n return { 'message': 'Favourite does not exist'}, 404\n\n # Check User permission\n current_user = flask_praetorian.current_user()\n if favourite.user_id != current_user.id:\n return { 'message': 'Unauthorized to delete Favourite'}, 401\n \n try:\n db.session.delete(favourite)\n db.session.commit()\n except Exception:\n return { 'message': 'Unable to delete Favourite'}, 500\n \n return { 'message': 'Favourite deleted successfully' }"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Renders global Tweeter feed, in chronological order of most recent retweet or time of posting. The global is global because it includes tweets posted by all users.
|
def global_feed():
if "user" in session:
return render_template("global_feed_template.html",
tweets=Tweets(session["tweets"]),
user=session["user"],
users=json.load(open("users.json")),
title="Global Feed")
else:
return render_template("global_feed_template.html",
tweets=Tweets(session["tweets"]),
user="",
title="Global Feed")
|
[
"def retweet():\n tw_id = request.args.get(\"tweet\")\n\n tws = session[\"tweets\"]\n tws[tw_id][\"retweet_time\"] = datetime.now().strftime(\"%m/%d/%Y %H:%M:%S\")\n tws[tw_id][\"retweeter\"] = session[\"user\"]\n\n session[\"tweets\"] = tws\n save_tweets()\n\n return redirect(\"/personal_feed\")",
"def feed():\n scrape_data()\n with open(os.path.join(REFERENCE_DIR, \"profile_spider.json\"), \"r\") as f:\n twimages = json.load(f)\n\n status = tweets()\n twimages.extend(t for t in tweets())\n shuffle(twimages)\n\n return render_template('feed.html', twimages=twimages, status=status)",
"def getNewerTweets():\n recent = True\n Searcher.set_twitter_connection(login, TWITTER_CREDENTIALS)\n Searcher.run(search_terms, limit, recent, REST)",
"def put_tweets(self):\n\t\tscreen_name = self.screen_name\n\t\tself.get_user_retweets()\n\t\tself.retweet_df[\"date\"] = pd.to_datetime(self.retweet_df['created_at']).dt.date\n\t\tself.retweet_df = self.retweet_df[self.retweet_df[\"date\"] >= self.__START_DATE]\n\t\tself.retweet_df = self.retweet_df.drop(\"date\",axis=1)\n\t\twrite_to_file(self.file_path,self.retweet_df,self.screen_name)\n\t\tprint(\"--- done for {} ---\".format(screen_name))",
"def collect_random_tweets(self):\n print(\"Random tweet search started\")\n query = \"en OR of OR is OR het OR de\"\n while True:\n try:\n for statuses in tweepy.Cursor(self.api.search, q=query, lang='nl').pages():\n for status in statuses:\n print(status)\n #self._save_tweet(status=status)\n except tweepy.TweepError as e:\n print(\"Error in random tweets: {}\".format(e))\n self.authenticate()\n continue\n print(\"Random tweet search ended\")",
"def do_tweets(self):\n http = httplib2.Http()\n if self.callsign:\n url = \"http://search.twitter.com/search.json?q=%s+from:%s\" % (urllib.quote('#' + self.callsign), urllib.quote(self.master))\n else:\n url = \"http://search.twitter.com/search.json?q=from:%s\" % (urllib.quote(self.master))\n resp, content = http.request(url, \"GET\")\n d = json.loads(content)\n for j in d['results']:\n if j['id_str'] == self.lasttweeted:\n return\n else:\n self.tweet_out(j['id_str'])",
"def display_trends():\n #setting the input to the list returned from GetTrendsCurrent()\n trends = api.GetTrendsWoeid(woeid=23424977, exclude=None)\n #for the list of objects trends, provide the name and url attribute to the\n top_tweets = []\n for trend in trends:\n top_tweets.append((trend.name, trend.url))\n top_tweets = top_tweets[:5]\n return top_tweets",
"def fetch_tweets(self):\n for tweet in tweepy.Cursor(\n self.twitter_api.search_full_archive,\n environment_name=self.environment_name,\n query=self.hashtag,\n fromDate=self.start_date,\n toDate=self.end_date\n ).items(self.number_of_tweets_to_pull):\n self.tweets_list.append(\n [tweet.created_at,\n tweet.text.encode(\"utf-8\"),\n self.today_datetime]\n )",
"def get_latest_tweets():\n tweet = twitter.Api(consumer_key=config.twitter_consumer_key, \n consumer_secret = config.twitter_consumer_secret, \n access_token_key = config.twitter_access_key, access_token_secret = config.twitter_access_secret)\n red = redis.Redis(host = 'localhost', db = config.subfeed_db)\n unique_new_list = []\n liverpool_tweet_list = tweet.GetUserTimeline(screen_name = config.twitter_screen_name, count = config.twitter_limit)\n twitter_key = \"lfc_twitter\"\n for lfctweet in liverpool_tweet_list:\n current_time = int(time.time()) \n present_in_db = red.zadd(twitter_key, lfctweet.id, current_time)\n if present_in_db == 1:\n twitter_url = \"https://www.twitter.com/\" + config.twitter_screen_name + \"/status/\" + str(lfctweet.id)\n unique_new_list.append(twitter_url)\n return unique_new_list",
"def trendingTweets():\n api = twitter.Api(consumer_key=key,consumer_secret=secret,access_token_key=access_key,access_token_secret=access_secret)\n trending_topics = api.GetTrendsWoeid(BOSTON_WOEID)\n for tweet in trending_topics:\n util.safe_print(tweet.GetText())",
"def fortify_tm_with_previous_posts(handles, api, max_tweets=100, save_path=''):\n\n engagements = []\n users = []\n for handle in handles:\n try:\n tweets, user = api.get_user_tweets(username=handle, max_number=max_tweets)\n print(user['screen_name'])\n users += [user]\n at_mentions = []\n reply_to = []\n retweets = []\n for tweet in tweets:\n try:\n user_mention_blocks = tweet['entities']['user_mentions']\n for block in user_mention_blocks:\n at_mentions += [block['id']]\n except Exception as e:\n pass\n try:\n if tweet['in_reply_to_user_id']:\n reply_to += [tweet['in_reply_to_user_id']]\n except Exception as e:\n pass\n try:\n retweets += [tweet['retweeted_status']['user']['id']]\n except Exception as e:\n pass\n engagements.append(at_mentions + reply_to + retweets)\n except Exception as e:\n print(e)\n\n\n target_market_arr = []\n for user in users:\n target_market_arr += [api.parse_user_to_twitter_user(user)]\n\n target_market = create_twitter_user_df(target_market_arr)\n target_market['Engagements in Past 100 Tweets'] = engagements\n\n target_market = target_market[target_market['Engagements in Past 100 Tweets'].astype(str) != '[]']\n\n TM_SIZE = len(target_market)\n\n target_market.to_csv(save_path+'TM.csv', encoding='utf-8', quoting=csv.QUOTE_ALL, index=False)\n\n return target_market, TM_SIZE",
"def get_all_tweets(screen_name):\n #Twitter only allows access to a users most recent 3240 tweets with this method\n \n #authorize twitter, initialize tweepy\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_key, access_secret)\n api = tweepy.API(auth)\n \n #initialize a list to hold all the tweepy Tweets\n alltweets = [] \n \n #make initial request for most recent tweets (200 is the maximum allowed count)\n new_tweets = api.user_timeline(screen_name = screen_name, count=200, include_rts = True)\n \n #only do this for users that have actually tweeted\n if len(new_tweets) > 0:\n #save most recent tweets\n alltweets.extend(new_tweets)\n \n #save the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n \n #keep grabbing tweets until there are no tweets left to grab\n while len(new_tweets) > 0:\n \n #all subsiquent requests use the max_id param to prevent duplicates\n new_tweets = api.user_timeline(screen_name = screen_name,count=200, max_id=oldest, include_rts = True)\n \n #save most recent tweets\n alltweets.extend(new_tweets)\n \n #update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n \n print \"...%s tweets downloaded so far\" % (len(alltweets))\n \n # Save tweets for user in a json file\n fname = \"tweets/\"+str(screen_name)+\".jsonl\"\n with open(fname, 'w') as f:\n for status in alltweets:\n f.write(json.dumps(status._json)+\"\\n\")\n \n #close the file\n print \"Done with \" + str(screen_name)\n time.sleep(60)\n print \"Sleeping for one minute\"",
"def get_all_tweets(screen_name,keys=keys,filter=True):\n\t\n\tconsumer_key,consumer_secret,access_key,access_secret = keys\n\n\t#re\n\trt = r'^RT'\n\tlink = r'https?:\\/\\/([\\w\\.-]+)\\/([\\w\\.-]+)'\n\tmention = r'^\\@'\n\n\t#authorize twitter, initialize tweepy\n\tauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n\tauth.set_access_token(access_key, access_secret)\n\tapi = tweepy.API(auth)\n\t\n\t#initialize a list to hold all the tweepy Tweets\n\talltweets = []\t\n\t\n\t#make initial request for most recent tweets (200 is the maximum allowed count)\n\tnew_tweets = api.user_timeline(screen_name = screen_name,count=200,tweet_mode='extended')\n\t\n\t#save most recent tweets\n\talltweets.extend(new_tweets)\n\t\n\t#save the id of the oldest tweet less one\n\toldest = alltweets[-1].id - 1\n\t\n\t#keep grabbing tweets until there are no tweets left to grab\n\twhile len(new_tweets) > 0:\n\t\tprint(\"getting tweets before {}\".format(oldest))\n\t\t\n\t\t#all subsiquent requests use the max_id param to prevent duplicates\n\t\tnew_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest,tweet_mode='extended')\n\t\t\n\t\t#save most recent tweets\n\t\talltweets.extend(new_tweets)\n\t\t\n\t\t#update the id of the oldest tweet less one\n\t\toldest = alltweets[-1].id - 1\n\t\t\n\t\tprint(\"...{} tweets downloaded so far\".format(len(alltweets)))\n\t\n\t#transform the tweepy tweets into a 2D array that will populate the csv\t\n\tif filter: \n\t\touttweets = [tweet.full_text for tweet in alltweets if not re.match(rt, tweet.full_text) and not re.match(mention, tweet.full_text)]\n\t\tpreproc = [re.sub(link, \"\", tweet)+\"\\n\" for tweet in outtweets][::-1]\n\telse: \n\t\touttweets = [tweet.full_text for tweet in alltweets]\n\t\n\t#write the csv\t\n\twith open('tweets/{}_tweets.txt'.format(screen_name), 'w', encoding='utf-8') as f:\n\t\tf.writelines(preproc)\n\t\tprint('tweets/{}_tweets.txt was successfully created.'.format(screen_name))\n\tpass",
"def get_tweets(self):\n\t\treturn self.tweets",
"def reply():\n # Get all (available) status texts by Int_SORSE after last seen tweet id\n id = read_last_seen()\n new_tweets = []\n new_statuses = Cursor(api.user_timeline, id=RETWEET_USER, since_id=id).items()\n\n # Add all new statuses since the last seen to list\n for status in new_statuses:\n new_tweets.append(status.id)\n\n # If there were any new tweets, retweet them\n if len(new_tweets) > 0:\n # Write last status\n write_last_seen(new_tweets[0])\n\n for id in reversed(new_tweets):\n print('Replying to tweet with ID ' + str(id))\n # Favourite this tweet\n api.create_favorite(id)\n # Retweet\n api.retweet(id)",
"def get_user_tweets(self):\n tweets = []\n for status in tweepy.Cursor(self.api.user_timeline).items():\n tweets.append(status)\n return tweets",
"def get_general_feed(user):\n following_unames = list(map(lambda u: u.username, get_following(user)))\n return sorted_reverse_id_order(list(filter(lambda post: (post.author_username in following_unames) or (post.author_username == user.username), list(UserPost.query.all()))))",
"def twitter_display(twitters):\n separator = \"\\t\"\n for status in reversed(twitters):\n nick = unicode(status.user.screen_name)\n nick_color = get_nick_color(nick)\n\n\n text = unicode(status.text)\n timestamp = int(dt2lt(status.created_at))\n print_line( \"%s%s%s%s\" %(nick_color, nick, separator, text), timestamp)",
"def getNewsFeed(self, userId):\n lst = self.followees[userId]\n lst.add(userId)\n allTweets=[]\n for fellow in lst:\n for x in self.tweets[fellow]:\n allTweets.append(x)\n allTweets.sort(key=lambda x:x[1],reverse=True)\n # print(allTweets)\n return [x[0] for x in allTweets[:10]]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Marks a tweet as retweeted by the current user. This moves a tweet to the top of the global feed for all and the top of a user's personal feed if the tweet was posted by or retweeted by someone they follow. Code in the html template with Jinja guarantees retweets can only occur when a user is logged in.
|
def retweet():
tw_id = request.args.get("tweet")
tws = session["tweets"]
tws[tw_id]["retweet_time"] = datetime.now().strftime("%m/%d/%Y %H:%M:%S")
tws[tw_id]["retweeter"] = session["user"]
session["tweets"] = tws
save_tweets()
return redirect("/personal_feed")
|
[
"def reply():\n # Get all (available) status texts by Int_SORSE after last seen tweet id\n id = read_last_seen()\n new_tweets = []\n new_statuses = Cursor(api.user_timeline, id=RETWEET_USER, since_id=id).items()\n\n # Add all new statuses since the last seen to list\n for status in new_statuses:\n new_tweets.append(status.id)\n\n # If there were any new tweets, retweet them\n if len(new_tweets) > 0:\n # Write last status\n write_last_seen(new_tweets[0])\n\n for id in reversed(new_tweets):\n print('Replying to tweet with ID ' + str(id))\n # Favourite this tweet\n api.create_favorite(id)\n # Retweet\n api.retweet(id)",
"def retweet(self, tweet_id):\n if not self.get_tweet(tweet_id).retweeted:\n self.api.retweet(tweet_id)",
"def ft_retweet(url):\r\n\ttry:\r\n\t\tids = url.split(\"/\")\r\n\t\ttwitter.retweet(id=ids[-1])\r\n\t\tsleep(1)\r\n\texcept Exception as e:\r\n\t\tprint(e)\r\n\texcept TwythonError as e:\r\n\t\tprint(e)",
"def put_tweets(self):\n\t\tscreen_name = self.screen_name\n\t\tself.get_user_retweets()\n\t\tself.retweet_df[\"date\"] = pd.to_datetime(self.retweet_df['created_at']).dt.date\n\t\tself.retweet_df = self.retweet_df[self.retweet_df[\"date\"] >= self.__START_DATE]\n\t\tself.retweet_df = self.retweet_df.drop(\"date\",axis=1)\n\t\twrite_to_file(self.file_path,self.retweet_df,self.screen_name)\n\t\tprint(\"--- done for {} ---\".format(screen_name))",
"def analyze_tweet_retweet(self, data):\n\n\t\tlog.info(f\"Analyzing possibility to like retweet with id: <{data['tweet_id']}>\")\n\n\t\theuristic_value = 0\n\t\t# Verify if there's a relation between the bot and the user\n\t\theuristic_value += self._score_for_relation(data)\n\n\t\t# Next we check if the bot and the user have some policies in common\n\t\theuristic_value += self._score_for_policies(data)\n\n\t\t# We then check if the bot has liked the tweet\n\t\tbot_logs = self.postgres.search_logs({\n\t\t\t\"bot_id\": data[\"bot_id\"],\n\t\t\t\"action\": log_actions.TWEET_LIKE,\n\t\t\t\"target_id\": data[\"tweet_id\"]\n\t\t}, limit=1)\n\t\tif bot_logs[\"success\"] and len(bot_logs[\"data\"]) > 0:\n\t\t\tlog.info(\"Bot already liked the tweet\")\n\t\t\theuristic_value = heuristic_value + BOT_LIKED_TWEET if heuristic_value < 0.7 else 1\n\n\t\t# Finally check if the bot already retweeted something from the user too recently\n\t\tbot_logs = self.postgres.search_logs({\n\t\t\t\"bot_id\": data[\"bot_id\"],\n\t\t\t\"action\": log_actions.RETWEET,\n\t\t\t\"timestamp\": datetime.now() - timedelta(seconds=PENALTY_RETWEETED_USER_RECENTLY_INTERVAL)\n\t\t}, limit=LIMIT_LOGS)\n\t\tif bot_logs['success']:\n\t\t\tbot_logs_dict = {}\n\t\t\tfor bot_log in bot_logs['data']:\n\t\t\t\tbot_logs_dict[str(bot_log['target_id'])] = {'timestamp': bot_log[\"timestamp\"]}\n\n\t\t\tif len(bot_logs_dict) > 0:\n\t\t\t\tusers_of_retweets = self.mongo.search(\n\t\t\t\t\tcollection=\"tweets\",\n\t\t\t\t\tquery={\"$or\": [{\"id_str\": target_id} for target_id in bot_logs_dict.keys()]},\n\t\t\t\t\tfields=[\"user\"],\n\t\t\t\t\tsingle=False\n\t\t\t\t)\n\n\t\t\t\tif users_of_retweets:\n\t\t\t\t\tfor user_of_retweet in users_of_retweets:\n\t\t\t\t\t\tid_str = user_of_retweet['user']['id_str']\n\t\t\t\t\t\tif id_str == str(data[\"user_id\"]) and id_str in bot_logs_dict:\n\t\t\t\t\t\t\tlog.info(f\"Found a past retweet to the user with id <{data['user_id']}>: {user_of_retweet}\")\n\t\t\t\t\t\t\tlog.debug(\"Bot has recently retweet the user\")\n\t\t\t\t\t\t\theuristic_value += PENALTY_RETWEETED_USER_RECENTLY\n\n\t\tlog.info(f\"Request to retweet to tweet <{data['tweet_id']}> with heuristic value of <{heuristic_value}>\")\n\t\treturn heuristic_value",
"def ft_auto_retweet(ph, count=10, result_type=\"recent\"):\r\n\tsearch = ft_search_tweet(ph, count, result_type)\r\n\ttry:\r\n\t\tfor tweet in search[\"statuses\"]:\r\n\t\t\tft_retweet(tweet[\"id_str\"])\r\n\texcept Exception as e:\r\n\t\tprint(e)",
"def follow_rtrs(tweet_id, api):",
"def on_mention(self, tweet, prefix):\n # Is the author of the tweet in the authorized users' list?\n if tweet.author.screen_name.lower() in self.config['authorized_accounts']:\n # Retweet the status.\n self.api.retweet(tweet.id)\n logging.info(\"Retweeted status %s\" % self._tweet_url(tweet))",
"def scrape_tweet(tweet):\n\n\n dateUntil = tweet.created_at + timedelta(1)\n tweetCriteria = got.manager.TweetCriteria().setUsername(tweet.author.screen_name).setSince(\n tweet.created_at.strftime(\"%Y-%m-%d\")).setUntil(dateUntil.strftime(\"%Y-%m-%d\")).setMaxTweets(-1)\n found = False\n tweets = got.manager.TweetManager.getTweets(tweetCriteria)\n for tw in tweets:\n if tw.id == tweet.id_str:\n tweet.reply_count = tw.replies\n break;\n return tweet",
"def combine_retweet_users(twitter,exemplar,count):\n global followers_dict\n tweets = get_user_tweets(twitter,exemplar[0],count,False)\n print(\"Get tweets \",tweets[0])\n for tweet in tweets:\n create_users_dict_from_rt(twitter,exemplar,tweet)\n print(\"finish retweet users\")",
"def _setRetweetCount(self):\n retweetCount = 0\n if \"retweeted_status\" in self._tweet:\n retweetCount = self._tweet[\"retweeted_status\"][\"retweet_count\"]\n return retweetCount",
"def retweet_logic(self, tweet, tweet_id, screen_name, user_id):\n # use SUTime to parse a datetime out of tweet\n time_room = self.parse_time_room(tweet)\n\n # make sure both time and room extracted and only one val each\n val_check = self.value_check(time_room)\n\n if val_check == (1, 1):\n room = time_room[\"room\"][0]\n date_mention = tweet_utils.check_date_mention(tweet)\n converted_time = time_utils.convert_to_utc(time_room[\"date\"][0],\n date_mention)\n\n # check for a time and room conflict, only 1 set of retweets per event\n # default time range that a room is resrved for is -15 +30 mins\n conflict = db_utils.check_time_room_conflict(converted_time, room)\n\n if not conflict:\n event_obj = db_utils.create_event(description=tweet,\n start=converted_time,\n location=room,\n creator=screen_name)\n\n tweet_utils.schedule_tweets(screen_name, tweet, tweet_id, converted_time, event_obj)\n\n # slack_msg = \"{} From: {}, id: {}\".format(tweet, screen_name, user_id)\n # self.send_slack_message('#outgoing_tweets', slack_message)\n\n send_slack_message(user_id=user_id,\n tweet_id=tweet_id,\n screen_name=screen_name,\n tweet_created=True,\n tweet=tweet,\n slack_msg=tweet)\n\n self.send_mention_tweet(screen_name, room, converted_time)\n\n else:\n message = f\"Tweet found for an already scheduled event: {tweet}\"\n send_slack_message(user_id=user_id,\n tweet_id=tweet_id,\n screen_name=screen_name,\n tweet_created=False,\n tweet=tweet,\n slack_msg=message,\n channel=\"conflict\")\n\n elif val_check == (0, 0):\n # tweet found but without valid time or room extracted, ignore\n pass\n\n else:\n # tweet with relevant information but not exactly 1 time & 1 room\n slack_msg = \"\"\"Tweet found that needs review: {} tweet_id: {} screen_name: {}, user_id: {}\"\"\"\n slack_msg = slack_msg.format(tweet, tweet_id, screen_name, user_id)\n # self.send_slack_message(\"#need_review\", message)\n\n send_slack_message(user_id=user_id,\n tweet_id=tweet_id,\n screen_name=screen_name,\n tweet_created=False,\n tweet=tweet,\n slack_msg=slack_msg)",
"def fortify_tm_with_previous_posts(handles, api, max_tweets=100, save_path=''):\n\n engagements = []\n users = []\n for handle in handles:\n try:\n tweets, user = api.get_user_tweets(username=handle, max_number=max_tweets)\n print(user['screen_name'])\n users += [user]\n at_mentions = []\n reply_to = []\n retweets = []\n for tweet in tweets:\n try:\n user_mention_blocks = tweet['entities']['user_mentions']\n for block in user_mention_blocks:\n at_mentions += [block['id']]\n except Exception as e:\n pass\n try:\n if tweet['in_reply_to_user_id']:\n reply_to += [tweet['in_reply_to_user_id']]\n except Exception as e:\n pass\n try:\n retweets += [tweet['retweeted_status']['user']['id']]\n except Exception as e:\n pass\n engagements.append(at_mentions + reply_to + retweets)\n except Exception as e:\n print(e)\n\n\n target_market_arr = []\n for user in users:\n target_market_arr += [api.parse_user_to_twitter_user(user)]\n\n target_market = create_twitter_user_df(target_market_arr)\n target_market['Engagements in Past 100 Tweets'] = engagements\n\n target_market = target_market[target_market['Engagements in Past 100 Tweets'].astype(str) != '[]']\n\n TM_SIZE = len(target_market)\n\n target_market.to_csv(save_path+'TM.csv', encoding='utf-8', quoting=csv.QUOTE_ALL, index=False)\n\n return target_market, TM_SIZE",
"def likeRecentTweets():\n last_seen_id = retrieveLastSeenId(FILE_NAME)\n recent_tweets = api.home_timeline(last_seen_id, tweet_mode='extended')\n for tweet in reversed(recent_tweets):\n last_seen_id = tweet.id\n storeLastSeenId(last_seen_id, FILE_NAME)\n\n # Functionality\n if not tweet.favorited:\n tweet.favorite()",
"def _post_action(self, user):\n if user:\n d = {'type':TwitterOAuth.TYPE,\n 'nickname':user.get('username', ''),\n 'email':'',\n 'userid':user.get('id', ''),\n 'realname':user.get('name', ''),\n 'icon_url':user.get('profile_image_url', ''),\n }\n token = user.get('access_token', '')\n if token:\n if token.get('secret', '') and token.get('key', ''):\n d['access_secret'] = token.get('secret', '')\n d['access_key'] = token.get('key', '')\n memcache.set(self.cookies.get(OAUTH_ACCESS_TOKEN_COOKIE),\n d, namespace = TWITTER_NAMESPACE, time = EXPIRE)\n rurl = self.session.get('referer', '')\n if rurl:\n # clear 'referer' key in session object.\n del self.session['referer']\n self.session.put()\n self.redirect(rurl)\n else:\n self.redirect('/')\n\n self.render('blank')",
"def capture_tweets_for_posterity():\n their_tweets = TWITTER.user_timeline(\n ORIGINAL_TWEETER,\n count=BATCH_SIZE,\n since_id=LATEST_CAPTURED_TWEET)\n their_tweets.reverse() # i.e. put in chronological order\n for their_tweet in their_tweets:\n try:\n local_screenshot = get_tweet_screenshot(their_tweet)\n my_tweet_text = build_intro(their_tweet.user.screen_name)\n send_my_tweet(my_tweet_text, local_screenshot)\n check_off(their_tweet)\n finally:\n os.remove(local_screenshot)",
"def global_feed():\n if \"user\" in session:\n return render_template(\"global_feed_template.html\",\n tweets=Tweets(session[\"tweets\"]),\n user=session[\"user\"],\n users=json.load(open(\"users.json\")),\n title=\"Global Feed\")\n else:\n return render_template(\"global_feed_template.html\",\n tweets=Tweets(session[\"tweets\"]),\n user=\"\",\n title=\"Global Feed\")",
"def getRT(statuses):\n\n retweets_dup = [\n (status['retweet_count'], \n status['retweeted_status']['user']['screen_name'], \n status['text'])\n for status in statuses \n if status.has_key('retweeted_status')]\n return list(set(retweets_dup))",
"def delete_tweet():\n tw_id = request.args.get(\"tweet\")\n global_feed = request.args.get(\"global\")\n\n tws = session[\"tweets\"]\n tws.pop(tw_id)\n session[\"tweets\"] = tws\n save_tweets()\n\n if global_feed == \"True\":\n return redirect(\"/global_feed\")\n else:\n return redirect(\"/personal_feed\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Registers a user to users.json. Now that user can log in via /login.
|
def register():
if request.method == 'GET':
return render_template("register_template.html",
title="Register")
un = request.form["username"]
pw = request.form["password"]
users = json.load(open("users.json"))
if not un:
flash("Please provide a non-empty username.")
return redirect("/register")
if not pw:
flash("Please provide a non-empty password.")
return redirect("/register")
if any(c.isspace() for c in un):
flash("Please provide a username without whitespace.")
return redirect("/register")
if any(c.isspace() for c in pw):
flash("Please provide a password without whitespace.")
return redirect("/register")
if un in users:
flash("User already registered.")
return redirect("/register")
users[un] = {"pw": pw,
"following": [un],
"followers": [un]}
with open("users.json", "w") as outfile:
json.dump(users, outfile)
return redirect("/login")
|
[
"def register(ctx, username, password):\n url = ctx.obj['URLS'].register_user()\n headers = ctx.obj['HEADERS']\n data = {\n 'username': username,\n 'password': password,\n 'verify': False\n }\n try:\n r = requests.post(url, json=data, headers=headers)\n r.raise_for_status()\n body = r.json()\n if ctx.obj['RAW']:\n click.echo(json.dumps(body, indent=4))\n else:\n user_id = body['id']\n user_name = body['username']\n click.echo('Registered {} with ID {}.'.format(user_name, user_id))\n except (requests.ConnectionError, requests.HTTPError) as ex:\n click.echo('{}'.format(ex))",
"def register_user(data):\n if USERINFO_DB is None:\n # failed to get db connection\n return False\n\n users = USERINFO_DB['users']\n data = json.loads(data)\n # TODO: validate json\n # TODO: validate user (duplicates?)\n users.insert_one(data)\n\n return True",
"def register():\r\n\tdata = request.json\r\n\ttry:\r\n\t\tusername = data['username']\r\n\t\tpassword = data['password']\r\n\t\tuser = Users.query.filter(Users.name == username).first()\r\n\t\tif user:\r\n\t\t\tif check_password_hash(user.password, password):\r\n\t\t\t\treturn {\"x-access-token\" : user.generate_jwt_token()}, 200\r\n\t\t\telse:\r\n\t\t\t\traise AttributeError(\"Incorrect password\")\r\n\t\telse:\r\n\t\t\tcurrent_app.logger.info(f\"Creating new user {username}...\")\r\n\t\t\taccess_token = Users.add_user(name=username, password=password)\r\n\r\n\texcept (KeyError,TypeError) as e:\r\n\t\treturn {\"error\" : f\"Invalid input data. {e}. Please provide username and password\"}\r\n\texcept AttributeError as e:\r\n\t\treturn {\"error\":\"Invalid Login {}\".format(e)}\r\n\telse:\r\n\t\treturn {\"x-access-token\" : access_token}, 201",
"def create_user():\n user_record = request.get_json(force=True)\n\n add_user_to_db(user_record)\n\n return \"Successfully added user.\", 200",
"def register_user(self, user_info) -> Dict:\n raise NotImplementedError",
"def register_user(url, payload):\n resp = requests.post(url, data=payload)\n resp_obj = {\n 'resp_obj': resp,\n 'resp_data': resp.json()\n }\n return resp_obj",
"def create_user(self):\n self.users.update({\n self.user_id: {\n 'username': self.username,\n 'email': self.email,\n 'password': self.password,\n 'password2': self.password2\n }\n })\n\n return self.users",
"def test_endpoint_creates_user(self):\n new_user = {\n \"username\": \"maina\",\n \"password\": \"password123\"\n }\n response = self.client.post('/api/v1/auth/register', data=new_user)\n # status CREATED\n self.assertEqual(response.status_code, 201)",
"def register():\n username = request.form['username']\n address = request.form['server_address']\n\n if not servers.exists(address=address):\n return bad_json_response('Server is not registered.')\n\n server_id = servers.export_one('id', address=address)\n\n if ping(address):\n if not users.exists(username=username):\n users.insert(username=username, server_id=server_id)\n else:\n return bad_json_response(\n 'Username is already taken. Try again :).')\n else:\n return bad_json_response(\n 'This data server is not available. '\n 'Please contact the server owner.'\n )\n\n return good_json_response('success')",
"def add_user_and_password(cls, username, password):\n\n users = cls.users\n users[username] = hashpw(password.encode('utf-8'),\n gensalt()).decode('utf-8')\n try:\n with open(cls.cwd + \"/users.json\", \"w\") as outfile:\n json.dump(users, outfile, sort_keys=True, indent=4)\n except:\n logger.info('Unable to write new user file.')\n cls.users = users",
"def add_user(self, user: User) -> None:\n\t\tpass",
"def add_user():\n\n username = request.form.get(\"username\")\n password = request.form.get(\"password\")\n\n new_user = User(email=username,\n password=password)\n\n db.session.add(new_user)\n db.session.commit()\n\n flash('Logged in')\n return redirect(\"/\")",
"def register_user(self, user_id):\n options = UserOptions((user_id, None, None))\n self.save_item(options)\n profile = UserProfile((user_id, None, 0, None))\n self.save_item(profile)",
"def initialize_user():\n flask.g.user = readit.User(flask.session.get('session_key', None))\n flask.g.user.user_id = flask.session.get('user_id', None)",
"def register():\n form = RegistrationForm()\n if form.validate_on_submit():\n user = User(name=form.name.data,\n department=form.department.data,\n position=form.position.data,\n password=form.password.data)\n\n # add employee to the database\n db.session.add(user)\n db.session.commit()\n if User.query.count() == 1:\n \tseed_static_data(user)\n flash('You have successfully registered! You may now login.')\n\n # redirect to the login page\n return redirect(url_for('auth.login'))\n\n # load registration template\n return render_template('auth/register.html', form=form, title='Register')",
"def register():\n email = request.form.get('email')\n password = request.form.get('password')\n firstname = request.form.get('firstname')\n lastname = request.form.get('lastname')\n\n user = User(email=email, password=password, firstname=firstname,\n lastname=lastname)\n form = RegisterForm(request.form, user)\n\n if form.validate_on_submit():\n # Account creation is succesful unless the following function raises\n # an exception. To stay on the safe side, we assert _err == 0.\n res = ordrin_api.create_account(email, password, firstname,\n lastname)\n assert not res['_err']\n # TODO: Refactor password hashing. The ordr.in python library should\n # probably be refactored so it can accept already hashed passwords.\n user.password = sha256(password).hexdigest()\n user.save()\n login(user)\n return JSONResponse(user)\n else:\n return JSONResponse({'errors': form.errors})",
"def add_local_user() -> None:\n from getpass import getpass\n\n login = input(\"Username (login): \").strip()\n password = getpass()\n if not all([login, password]):\n print(\"Both username and password are required.\")\n return\n app = make_app() # type: ignore\n with app.app_context():\n user = User(name=login, password=password)\n DB.session.add(user)\n DB.session.commit()",
"def post(self):\n test_user = api.payload\n if 'username' in test_user:\n username = test_user['username']\n if 'password' in test_user:\n password = test_user['password']\n res = User.login(username=username, password=password, registered_users=registered_users)\n return res",
"def signup():\n\n global active_user\n req = request.get_json(force=True, silent=True)\n username = req.get('username')\n password = req.get('password')\n # print(user, password)\n try:\n user = User.get(user_id=username)\n if not user:\n print('i was here')\n # create_user(userid = user, password = password)\n User(user_id=username, password=password)\n active_user = username\n return \"SUCESSS, Your ID is created\"\n else:\n return \"FALIURE, Your ID was already taken\"\n except Exception as e:\n return str(e)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets up links to module_directory/base_binary under the given link_names in $PATH
|
def make_links(module_dir, base_binary, link_names):
if os.path.isfile(module_dir + "/" + base_binary):
for alias in link_names:
try:
os.unlink(tmpdir + "/bin/" + alias)
except OSError:
pass
os.symlink(os.getcwd() + "/" + module_dir + "/" + base_binary,
tmpdir + "/bin/" + alias)
|
[
"def link(paths):\n with LogSection(\"Setting up symlinks...\"):\n dotfile_dir = os.path.dirname(os.path.realpath(__file__))\n for src, dst in sorted(paths.items(), key=lambda item: item[0]):\n src = expand(os.path.join(dotfile_dir, src))\n dst = expand(dst)\n if os.path.realpath(src) == os.path.realpath(dst):\n # Skip correct symlinks\n continue\n elif os.path.islink(dst):\n # Remove incorrect symlinks\n os.remove(dst)\n elif os.path.exists(dst):\n # Backup existing files\n backup(dst)\n else:\n # Create subdirectories\n parent = os.path.dirname(dst)\n if not os.path.exists(parent):\n os.makedirs(os.path.dirname(dst))\n print(\"{} -> {}\".format(src, dst))\n os.symlink(src, dst)",
"def link_files(links, force_link=False):\n for link in links:\n (link_name, source), = link.items()\n\n link_name = os.path.expanduser(link_name)\n\n symlink(os.path.join(os.getcwd(), source), link_name, force_link)",
"def setupUserDirSymlinks(userDirs):\r\n for userDir in userDirs:\r\n source = os.path.join(\"/userdirs/\",userDir)\r\n destination = os.path.join(hostSubuserHome,userDir)\r\n if not os.path.islink(destination):\r\n os.symlink(source,destination)",
"def _gen_bin_link(self):\n real_bin_path = os.path.join(common_util.get_android_root_dir(),\n common_util.get_android_out_dir(),\n constant.IDE_ECLIPSE,\n self.module_name)\n if not os.path.exists(real_bin_path):\n os.makedirs(real_bin_path)\n return {self._PROJECT_LINK.format(self._OUTPUT_BIN_SYMBOLIC_NAME,\n real_bin_path)}",
"def rm_mod_links(modlinkdirs=modlinkdirs):\n\n print(\"Removing links to compiled mods dir.\")\n \n for modlinkdir in modlinkdirs:\n path1 = os.path.join(modlinkdir, \"x86_64\")\n path2 = os.path.join(modlinkdir, \"i386\")\n if os.path.islink(path1):\n if (os.path.realpath(path1) != path1):\n print(\" Removing directory: \" + path1)\n os.remove(path1)\n else:\n print(\" Removing directory: \" + path1)\n shutil.rmtree(path1, ignore_errors=True)\n if os.path.islink(path2):\n if (os.path.realpath(path1) != path2):\n print(\" Removing directory: \" + path2)\n os.remove(path2)\n else:\n print(\" Removing directory: \" + path2)\n shutil.rmtree(path2, ignore_errors=True)",
"def make_symlinks(arguments):\n\n ignored = {\".config\", \".local\", \".git\", \".gitignore\", \".gitmodules\"}\n\n for dotfile in glob(os.path.join(arguments.dotfiles, \".*\")):\n if os.path.basename(dotfile) in ignored:\n continue\n\n dest = os.path.join(arguments.dest_dir, os.path.basename(dotfile))\n symlink_unless_present(dotfile, dest)\n\n for each in [\".config\", \".local\", \".local/share\"]:\n dotfile_dir = os.path.join(arguments.dotfiles, each)\n dest_dir = os.path.join(arguments.dest_dir, each)\n try:\n os.symlink(dotfile_dir, dest_dir)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n # Fall back on linking individual files if we failed.\n if os.path.realpath(dest_dir) != dotfile_dir:\n for dotfile in os.listdir(dotfile_dir):\n dotfile = os.path.join(dotfile_dir, dotfile)\n dest = os.path.join(dest_dir, os.path.basename(dotfile))\n symlink_unless_present(dotfile, dest)\n\n bin_dir = os.path.abspath(os.path.normpath(arguments.bin_dir))\n try:\n os.makedirs(bin_dir)\n except OSError as error:\n if error.errno != errno.EEXIST:\n raise\n\n for dotfile in glob(os.path.join(arguments.dotfiles, \"bin\", \".*\")):\n dest = os.path.join(bin_dir, os.path.basename(dotfile))\n symlink_unless_present(dotfile, dest)",
"def _add_rpaths(env, install_off, set_cgo_ld, is_bin):\n if GetOption('no_rpath'):\n if set_cgo_ld:\n env.AppendENVPath(\"CGO_LDFLAGS\", env.subst(\"$_LIBDIRFLAGS \"), sep=\" \")\n return\n env.AppendUnique(RPATH_FULL=['$PREFIX/lib64'])\n rpaths = env.subst(\"$RPATH_FULL\").split()\n prefix = env.get(\"PREFIX\")\n if not is_bin:\n path = r'\\$$ORIGIN'\n env.AppendUnique(RPATH=[DaosLiteral(path)])\n for rpath in rpaths:\n if rpath.startswith('/usr'):\n env.AppendUnique(RPATH=[rpath])\n continue\n if install_off is None:\n env.AppendUnique(RPATH=[os.path.join(prefix, rpath)])\n continue\n relpath = os.path.relpath(rpath, prefix)\n if relpath != rpath:\n if set_cgo_ld:\n env.AppendENVPath(\"CGO_LDFLAGS\", f'-Wl,-rpath=$ORIGIN/{install_off}/{relpath}',\n sep=\" \")\n else:\n joined = os.path.normpath(os.path.join(install_off, relpath))\n env.AppendUnique(RPATH=[DaosLiteral(fr'\\$$ORIGIN/{joined}')])\n for rpath in rpaths:\n path = os.path.join(prefix, rpath)\n if is_bin:\n # NB: Also use full path so intermediate linking works\n env.AppendUnique(LINKFLAGS=[f'-Wl,-rpath-link={path}'])\n else:\n # NB: Also use full path so intermediate linking works\n env.AppendUnique(RPATH=[path])\n\n if set_cgo_ld:\n env.AppendENVPath(\"CGO_LDFLAGS\", env.subst(\"$_LIBDIRFLAGS $_RPATH\"), sep=\" \")",
"def register_libraries():\n\n global _binaries_dir\n\n for binary in os.listdir(_binaries_dir):\n if binary.endswith(('.zip', '.egg', '.whl')):\n sys.path.insert(0, os.path.join(_binaries_dir, binary))",
"def linkHals(hubDir, hals):\n relativePaths = []\n for i, hal in enumerate(hals):\n uniqueName = 'input_%d' % i + \".hal\"\n system(\"ln -sf %s %s\" % (hal, os.path.join(hubDir, uniqueName)))\n relativePaths.append('../' + uniqueName)\n return relativePaths",
"def _add_links(self, cmdline):\n # need to add the current file to the DB so that we have the filefilelink and filecodelink info\n current_file = os.path.join(self.dbu.getIncomingPath(), self.filename)\n df = self.pq.figureProduct(current_file) # uses all the inspectors to see what product a file is\n if df is None:\n DBlogging.dblogger.error(\"{0} did not have a product\".format(current_file))\n self.moveToError(current_file)\n return\n df.params['verbose_provenance'] = ' '.join(cmdline)\n f_id = self.pq.diskfileToDB(df)\n ## here the file is in the DB so we can add the filefilelink an filecodelinks\n if f_id is not None: # None comes back if the file goes to error\n self.dbu.addFilecodelink(f_id, self.code_id)\n for val in self.input_files: # add a link for each input file\n self.dbu.addFilefilelink(f_id, val)",
"def setup_paths(binaryDir, appDir):\r\n\r\n paths = [\r\n binaryDir,\r\n os.path.join(binaryDir, 'lib'),\r\n os.path.join(binaryDir, 'scripts'),\r\n ] \r\n sys.path.extend([os.path.normpath(p) for p in paths])\r\n\r\n find_eggs(paths[0])\r\n find_eggs(appDir)",
"def generateSymbolicLink(path, output_dir, first_replacement=\"uploaded\",\n second_replacement=\"home/mediapanel\",\n lua_folder=\"themes\"):\n split_path = path.split(\"/\")\n replaced_index = split_path.index(first_replacement)\n replacement_dir = os.path.join(second_replacement, lua_folder, output_dir)\n split_path[replaced_index] = replacement_dir\n os.symlink(path, os.path.join(*split_path))",
"def link_files(self):\n\n for package in self.packages:\n package.link_files()\n\n for _file in self.files:\n if _file.create_link:\n _file.link()",
"def create_install_and_links():\n global PACKAGE\n install = \"\"\n install_dir = PACKAGE.split('fonts-')[1]\n\n for dirpath, dirnames, filenames in os.walk('..'):\n if dirnames != 'debian':\n for filename in filenames:\n if filename.endswith('.ttf'):\n install += \"./{} usr/share/fonts/truetype/{}/\\n\".format(\n filename, install_dir)\n elif filename.endswith('.otf'):\n install += \"./{} usr/share/fonts/truetype/{}/\\n\".format(\n filename, install_dir)\n elif filename.endswith('.sfd'):\n if 'generate.pe' in filenames:\n check_generatepe(os.path.join(dirpath,\n filenames[filenames.index(\n 'generate.pe')]))\n if GENERATES_TTF:\n install += \"./{} usr/share/fonts/truetype/{}/\\n\".\\\n format(filename.replace('sfd', 'ttf'),\n install_dir)\n elif GENERATES_OTF:\n install += \"./{} usr/share/fonts/opentype/{}/\\n\".\\\n format(filename.replace('sfd', 'otf'),\n install_dir)\n else:\n print(\"\\n*Unable to determine if source generates\"\\\n \"TTF or OTF file.\\n\\Please manually edit\"\\\n \"the debian/install file*\")\n else:\n print(\"\\n*Unable to determine if source generates\"\\\n \"TTF or OTF file.\\nPlease manually edit\"\\\n \"the debian/install file*\")\n\n elif filename.endswith('.conf'):\n install += \"./{} etc/fonts/conf.avail\".format(filename)\n print(\"\\nFound a fontconfig configuration file.\"\\\n \"Added it to debian/install\")\n with open('links', 'w') as fd:\n fd.write('etc/fonts/conf.avail/'+filename +\n ' etc/fonts/conf.d/'+filename)\n print(\"\\nI've symlinked conf file in etc/fonts/conf.d\"\\\n \".\\nPlease update fontconfig priority\"\\\n \"appropriately\")\n\n with open('install', 'w') as fd:\n fd.write(install)",
"def generate(env):\n gnulink.generate(env)\n\n env['LINKFLAGS'] = SCons.Util.CLVar('-Wl,-no-undefined')\n\n env['SHLINKCOM'] = shlib_action\n env['LDMODULECOM'] = ldmod_action\n env.Append(SHLIBEMITTER = [shlib_emitter])\n env.Append(LDMODULEEMITTER = [ldmod_emitter])\n\n env['SHLIBPREFIX'] = 'cyg'\n env['SHLIBSUFFIX'] = '.dll'\n\n env['IMPLIBPREFIX'] = 'lib'\n env['IMPLIBSUFFIX'] = '.dll.a'\n\n # Variables used by versioned shared libraries\n env['_SHLIBVERSIONFLAGS'] = '$SHLIBVERSIONFLAGS'\n env['_LDMODULEVERSIONFLAGS'] = '$LDMODULEVERSIONFLAGS'\n\n # SHLIBVERSIONFLAGS and LDMODULEVERSIONFLAGS are same as in gnulink...\n\n # LINKCALLBACKS are NOT inherited from gnulink\n env['LINKCALLBACKS'] = {\n 'VersionedShLibSuffix' : _versioned_lib_suffix,\n 'VersionedLdModSuffix' : _versioned_lib_suffix,\n 'VersionedImpLibSuffix' : _versioned_lib_suffix,\n 'VersionedShLibName' : link._versioned_shlib_name,\n 'VersionedLdModName' : link._versioned_ldmod_name,\n 'VersionedShLibImpLibName' : lambda *args: _versioned_implib_name(*args, libtype='ShLib'),\n 'VersionedLdModImpLibName' : lambda *args: _versioned_implib_name(*args, libtype='LdMod'),\n 'VersionedShLibImpLibSymlinks' : lambda *args: _versioned_implib_symlinks(*args, libtype='ShLib'),\n 'VersionedLdModImpLibSymlinks' : lambda *args: _versioned_implib_symlinks(*args, libtype='LdMod'),\n }\n\n # these variables were set by gnulink but are not used in cyglink\n try: del env['_SHLIBSONAME']\n except KeyError: pass\n try: del env['_LDMODULESONAME']\n except KeyError: pass",
"def link(*objects):\n command = [shared.LLVM_LINK] + list(objects)\n with get_temp_file('.bc') as out: ret = subprocess.call(command, stdout=out)\n if ret != 0: raise RuntimeError('Could not link %s.' % objects)\n return out.name",
"def add_reference_scripts(dest: str):\n print(f'Copying reference scripts to {dest}')\n print('\\treduce_5.py')\n su.copyfile('../../reduce_5.py', f'{dest}/reduce_5.py')\n \n print('\\treduce_9.py')\n su.copyfile('../../reduce_9.py', f'{dest}/reduce_9.py')\n \n print('Linking to mir_utils.py')\n os.symlink('../../mir_utils.py', f'{dest}/mir_utils.py')",
"def main(simulation_directory, new_gll_directory):\n all_events_directories = sorted(glob(join(simulation_directory, \"*\")))\n for each_event_directory in all_events_directories:\n relink_single(each_event_directory, new_gll_directory)",
"def update_root_symlinks():\n config = commons.getConfig()\n\n for (section, option, symlink_suffix) in (('qserv', 'log_dir', os.path.join(\"var\", \"log\")),\n ('qserv', 'tmp_dir', 'tmp'),\n ('qserv', 'qserv_data_dir', os.path.join(\"var\", \"lib\"))):\n symlink_target = config[section][option]\n default_dir = os.path.join(config['qserv']['qserv_run_dir'], symlink_suffix)\n\n # symlink if target directory is not set to its default value\n if symlink_target != default_dir:\n if os.path.exists(default_dir):\n if os.path.islink(default_dir):\n os.unlink(default_dir)\n else:\n _LOG.fatal(\n \"Please remove {0} and restart the configuration procedure\".format(default_dir))\n sys.exit(1)\n _symlink(symlink_target, default_dir)\n\n _LOG.info(\"Qserv symlinks creation for externalized directories succeeded\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Computes the attention and alignments for a given attention_mechanism.
|
def _compute_decoder_attention(cell_output, hidden_states, previous_alignments, attention_layer):
print('cell_output', cell_output, 'hidden_states', hidden_states)
hidden_states_stack = tf.stack(hidden_states, axis=1)
print('hidden_states_stack', hidden_states_stack)
# with variable_scope.variable_scope(None, "bahdanau_attention", [query]):
# processed_query = self.query_layer(query) if self.query_layer else query
# score = _bahdanau_score(processed_query, self._keys, self._normalize)
# alignments = self._probability_fn(score, previous_alignments)
# return alignments
# num_units = keys.shape[2].value or array_ops.shape(keys)[2]
# # Reshape from [batch_size, ...] to [batch_size, 1, ...] for broadcasting.
# processed_query = array_ops.expand_dims(processed_query, 1)
# v = variable_scope.get_variable(
# "attention_v", [num_units], dtype=dtype)
# math_ops.reduce_sum(v * math_ops.tanh(keys + processed_query), [2])
cell_output_expand = array_ops.expand_dims(cell_output, 1)
num_units = cell_output_expand.shape[2].value or array_ops.shape(cell_output_expand)[2]
print('cell_output', cell_output_expand)
v = tf.get_variable("attention_v_decoder", [num_units], dtype=tf.float32)
print('v', v)
score = math_ops.reduce_sum(v * math_ops.tanh(cell_output_expand + hidden_states_stack), [2])
print('score', score)
alignments = nn_ops.softmax(score)
print('alignments', alignments)
# Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time]
expanded_alignments = array_ops.expand_dims(alignments, 1)
# Context is the inner product of alignments and values along the
# memory time dimension.
# alignments shape is
# [batch_size, 1, memory_time]
# attention_mechanism.values shape is
# [batch_size, memory_time, memory_size]
# the batched matmul is over memory_time, so the output shape is
# [batch_size, 1, memory_size].
# we then squeeze out the singleton dim.
context = math_ops.matmul(expanded_alignments, hidden_states_stack)
context = array_ops.squeeze(context, [1])
if attention_layer is not None:
attention = attention_layer(array_ops.concat([cell_output, context], 1))
else:
attention = context
print('attention', attention, 'alignments', alignments)
return attention, alignments
|
[
"def _compute_attention(attention_mechanism, cell_output, attention_state,\n attention_layer):\n alignments, next_attention_state = attention_mechanism(\n cell_output, state=attention_state)\n\n # Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time]\n expanded_alignments = array_ops.expand_dims(alignments, 1)\n # Context is the inner product of alignments and values along the \t# memory time dimension.\n # alignments shape is\t# [batch_size, 1, memory_time]\n # attention_mechanism.values shape is\t# [batch_size, memory_time, memory_size]\n # the batched matmul is over memory_time, so the output shape is\t# [batch_size, 1, memory_size].\n # we then squeeze out the singleton dim.\n context = math_ops.matmul(expanded_alignments, attention_mechanism.values)\n context = array_ops.squeeze(context, [1])\n\n if attention_layer is not None:\n attention = attention_layer(array_ops.concat([cell_output, context], 1))\n else:\n attention = context\n\n return attention, alignments, next_attention_state",
"def __init__(self,\n cell,\n attention_mechanism,\n attention_layer_size=None,\n alignment_history=False,\n cell_input_fn=None,\n output_attention=True,\n initial_cell_state=None,\n name=None):\n super(JointAttentionWrapper, self).__init__(name=name)\n if not rnn_cell_impl._like_rnncell(cell): # pylint: disable=protected-access\n raise TypeError(\n \"cell must be an RNNCell, saw type: %s\" % type(cell).__name__)\n if isinstance(attention_mechanism, (list, tuple)):\n self._is_multi = True\n attention_mechanisms = attention_mechanism\n for attention_mechanism in attention_mechanisms:\n if not isinstance(attention_mechanism, AttentionMechanism):\n raise TypeError(\n \"attention_mechanism must contain only instances of \"\n \"AttentionMechanism, saw type: %s\"\n % type(attention_mechanism).__name__)\n else:\n self._is_multi = False\n print('instance', isinstance(attention_mechanism, AttentionMechanism), type(attention_mechanism))\n if not isinstance(attention_mechanism, AttentionMechanism):\n raise TypeError(\n \"attention_mechanism must be an AttentionMechanism or list of \"\n \"multiple AttentionMechanism instances, saw type: %s\"\n % type(attention_mechanism).__name__)\n attention_mechanisms = (attention_mechanism,)\n \n if cell_input_fn is None:\n cell_input_fn = (\n lambda inputs, encoder_attention, decoder_attention: array_ops.concat(\n [inputs, encoder_attention, decoder_attention], -1))\n else:\n if not callable(cell_input_fn):\n raise TypeError(\n \"cell_input_fn must be callable, saw type: %s\"\n % type(cell_input_fn).__name__)\n \n if attention_layer_size is not None:\n attention_layer_sizes = tuple(\n attention_layer_size\n if isinstance(attention_layer_size, (list, tuple))\n else (attention_layer_size,))\n if len(attention_layer_sizes) != len(attention_mechanisms):\n raise ValueError(\n \"If provided, attention_layer_size must contain exactly one \"\n \"integer per attention_mechanism, saw: %d vs %d\"\n % (len(attention_layer_sizes), len(attention_mechanisms)))\n self._attention_layers = tuple(\n layers_core.Dense(\n attention_layer_size,\n name=\"attention_layer\",\n use_bias=False,\n dtype=attention_mechanisms[i].dtype)\n for i, attention_layer_size in enumerate(attention_layer_sizes))\n self._attention_layer_size = sum(attention_layer_sizes)\n else:\n self._attention_layers = None\n self._attention_layer_size = sum(\n attention_mechanism.values.get_shape()[-1].value\n for attention_mechanism in attention_mechanisms)\n \n self._cell = cell\n self._attention_mechanisms = attention_mechanisms\n self._cell_input_fn = cell_input_fn\n self._output_attention = output_attention\n self._alignment_history = alignment_history\n with ops.name_scope(name, \"AttentionWrapperInit\"):\n if initial_cell_state is None:\n self._initial_cell_state = None\n else:\n final_state_tensor = nest.flatten(initial_cell_state)[-1]\n state_batch_size = (\n final_state_tensor.shape[0].value\n or array_ops.shape(final_state_tensor)[0])\n error_message = (\n \"When constructing JointAttentionWrapper %s: \" % self._base_name +\n \"Non-matching batch sizes between the memory \"\n \"(encoder output) and initial_cell_state. Are you using \"\n \"the BeamSearchDecoder? You may need to tile your initial state \"\n \"via the tf.contrib.seq2seq.tile_batch function with argument \"\n \"multiple=beam_width.\")\n with ops.control_dependencies(\n self._batch_size_checks(state_batch_size, error_message)):\n self._initial_cell_state = nest.map_structure(\n lambda s: array_ops.identity(s, name=\"check_initial_cell_state\"),\n initial_cell_state)",
"def _build_attentional_interface(self):\n print (\"==> Building the attentional interface:\")\n FLAGS = self.FLAGS\n batch_size = FLAGS.batch_size\n hidden_size = FLAGS.hidden_size\n #max_input_length = FLAGS.max_input_length\n loop_until = tf.to_int32(np.array(range(batch_size)))\n \n \n with tf.variable_scope('attention') as attn_scope:\n print (\"\\t attention:\")\n \n # Time-major self._all_outputs (N, M, H) --> (M, N, H)\n all_outputs_time_major = tf.transpose(self._all_outputs, perm=[1,0,2])\n \n # Apply tanh nonlinearity\n fn = lambda _input: tf.nn.tanh(_linear(\n args = _input,\n output_size = hidden_size,\n bias = True,\n bias_start = 0.0,\n nonlinearity = 'tanh',\n scope = attn_scope,\n name = 'attn_nonlinearity',))\n \n z = tf.map_fn(lambda x: fn(x), all_outputs_time_major, dtype=tf.float32)\n \n # Apply softmax weights\n fn = lambda _input: tf.nn.tanh(_linear(\n args = _input,\n output_size =1,\n bias = True,\n bias_start = 0.0,\n nonlinearity = 'tanh',\n scope = attn_scope,\n name = 'attn_softmax',))\n \n z = tf.map_fn(\n lambda x: fn(x), z, dtype= tf.float32)\n \n # Squeeze and convert to batch major\n z = tf.transpose(\n tf.squeeze(\n input = z,\n axis = 2,),\n perm = [1,0])\n \n # Normalize\n self._z = tf.nn.softmax(\n logits = z,)\n\n\n \n # Create context vector (via soft attention.)\n fn = lambda sample_num: tf.reduce_sum(\n tf.multiply(\n self._all_outputs[sample_num][:self._review_lens[sample_num]],\n # (500,) --> (500, 1) --> (500, 200)\n tf.tile(\n input = tf.expand_dims(\n self._z[sample_num][:self._review_lens[sample_num]],1),\n multiples = (1, hidden_size),\n )),\n axis = 0)\n \n self._c = tf.map_fn(\n lambda sample_num: fn(sample_num), loop_until, dtype = tf.float32 )\n print(\"\\t\\t self._Z\", self._z.get_shape())\n print(\"\\t\\t self._c\", self._c.get_shape())\n # attention:\n #\t\t Alpha vector: self._Z (?, 300)\n #\t\t Context Vector: self._c (256, 200) ",
"def _compute_attention(self,\n query_tensor,\n key_tensor,\n value_tensor,\n attention_mask=None,\n training=None):\n # Take the dot product between \"query\" and \"key\" to get the raw\n # attention scores.\n attention_scores = tf.einsum(self._dot_product_equation, key_tensor,\n query_tensor)\n attention_scores = tf.multiply(attention_scores,\n 1.0 / math.sqrt(float(self._key_dim)))\n\n # Apply linear projection before softmax\n attention_scores = tf.einsum(self._talking_heads_equation, attention_scores,\n self._pre_softmax_weight)\n\n # Normalize the attention scores to probabilities.\n # `attention_scores` = [B, N, T, S]\n attention_scores = self._masked_softmax(attention_scores, attention_mask)\n\n # Apply linear projection after softmax\n attention_scores = tf.einsum(self._talking_heads_equation, attention_scores,\n self._post_softmax_weight)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_scores_dropout = self._dropout_layer(\n attention_scores, training=training)\n\n # `context_layer` = [B, T, N, H]\n attention_output = tf.einsum(self._combine_equation,\n attention_scores_dropout, value_tensor)\n return attention_output, attention_scores",
"def attention_map(model, image):\n size = model.input_shape[1]\n grid_size = int(np.sqrt(model.layers[5].output_shape[0][-2] - 1))\n\n # Prepare the input\n X = vit.preprocess_inputs(cv2.resize(image, (size, size)))[np.newaxis, :] # type: ignore\n\n # Get the attention weights from each transformer.\n outputs = [\n l.output[1] for l in model.layers if isinstance(l, layers.TransformerBlock)\n ]\n weights = np.array(\n tf.keras.models.Model(inputs=model.inputs, outputs=outputs).predict(X)\n )\n num_layers = weights.shape[0]\n num_heads = weights.shape[2]\n reshaped = weights.reshape(\n (num_layers, num_heads, grid_size ** 2 + 1, grid_size ** 2 + 1)\n )\n\n # From Appendix D.6 in the paper ...\n # Average the attention weights across all heads.\n reshaped = reshaped.mean(axis=1)\n\n # From Section 3 in https://arxiv.org/pdf/2005.00928.pdf ...\n # To account for residual connections, we add an identity matrix to the\n # attention matrix and re-normalize the weights.\n reshaped = reshaped + np.eye(reshaped.shape[1])\n reshaped = reshaped / reshaped.sum(axis=(1, 2))[:, np.newaxis, np.newaxis]\n\n # Recursively multiply the weight matrices\n v = reshaped[-1]\n for n in range(1, len(reshaped)):\n v = np.matmul(v, reshaped[-1 - n])\n\n # Attention from the output token to the input space.\n mask = v[0, 1:].reshape(grid_size, grid_size)\n mask = cv2.resize(mask / mask.max(), (image.shape[1], image.shape[0]))[\n ..., np.newaxis\n ]\n return (mask * image).astype(\"uint8\")",
"def attention(\n self, decoder_states: Tensor, encoder_states: Tensor, padding_mask: Tensor = None\n ) -> Tensor:\n # DONE: Implement attention() using the signature, docstring, and comments as guide\n # Return a linear combination of Values per Query, as weighted by attention with the Keys\n # Hint: this can all be accomplished using transpose, matmul, addition, and softmax\n # Hint: add the padding mask before applying softmax\n # Hint: use torch.nn.functional.softmax and pay attention (no pun intended) to the dimension\n # over which it is applied\n\n q, k, v = decoder_states, encoder_states, encoder_states\n # print(q, k, v)\n\n # (batch_size, target_sequence_length, source_sequence_length)\n weights = torch.bmm(q, torch.transpose(k, 1, 2))\n if padding_mask is not None:\n weights = weights + padding_mask\n weights = F.softmax(weights, dim=-1)\n\n # (batch_size, target_sequence_length, source_sequence_length)\n attention = torch.bmm(weights, v)\n return attention",
"def _alignments(self) -> None:\n if not os.path.exists(self.align_path):\n logger.info(\"Training aligner\")\n train_opts = []\n if self.batch_size:\n train_opts.append(f\"--batch_size={self.batch_size}\")\n if self.delta:\n train_opts.append(f\"--delta={self.delta}\")\n if self.fst_default_cache_gc:\n train_opts.append(f\"--fst_default_cache_gc={self.fst_default_cache_gc}\")\n if self.fst_default_cache_gc_limit:\n train_opts.append(\n f\"--fst_default_cache_gc_limit={self.fst_default_cache_gc_limit}\"\n )\n if self.alpha:\n train_opts.append(f\"--alpha={self.alpha}\")\n if self.num_iterations:\n train_opts.append(f\"--max_iters={self.num_iterations}\")\n # Constructs the actual command vectors (plus an index for logging\n # purposes).\n random.seed(config.SEED)\n starts = [\n (\n RandomStart(\n idx,\n seed,\n self.input_far_path,\n self.output_far_path,\n self.cg_path,\n self.working_directory,\n train_opts,\n )\n )\n for (idx, seed) in enumerate(\n random.sample(range(1, RAND_MAX), self.random_starts), 1\n )\n ]\n stopped = threading.Event()\n num_commands = len(starts)\n job_queue = Queue()\n fst_likelihoods = {}\n # Actually runs starts.\n logger.info(\"Calculating alignments...\")\n begin = time.time()\n with tqdm(total=num_commands * self.num_iterations, disable=config.QUIET) as pbar:\n for start in starts:\n job_queue.put(start)\n error_dict = {}\n return_queue = Queue()\n procs = []\n for i in range(config.NUM_JOBS):\n log_path = self.working_log_directory.joinpath(f\"baumwelch.{i}.log\")\n p = RandomStartWorker(\n i,\n job_queue,\n return_queue,\n log_path,\n stopped,\n )\n procs.append(p)\n p.start()\n\n while True:\n try:\n result = return_queue.get(timeout=1)\n if isinstance(result, Exception):\n\n error_dict[getattr(result, \"job_name\", 0)] = result\n continue\n if stopped.is_set():\n continue\n except queue.Empty:\n for proc in procs:\n if not proc.finished.is_set():\n break\n else:\n break\n continue\n if isinstance(result, int):\n pbar.update(result)\n else:\n fst_likelihoods[result[0]] = result[1]\n for p in procs:\n p.join()\n if error_dict:\n raise PyniniAlignmentError(error_dict)\n (best_fst, best_likelihood) = min(fst_likelihoods.items(), key=operator.itemgetter(1))\n logger.info(f\"Best likelihood: {best_likelihood}\")\n logger.debug(\n f\"Ran {self.random_starts} random starts in {time.time() - begin:.3f} seconds\"\n )\n # Moves best likelihood solution to the requested location.\n shutil.move(best_fst, self.align_path)\n cmd = [thirdparty_binary(\"baumwelchdecode\")]\n if self.fst_default_cache_gc:\n cmd.append(f\"--fst_default_cache_gc={self.fst_default_cache_gc}\")\n if self.fst_default_cache_gc_limit:\n cmd.append(f\"--fst_default_cache_gc_limit={self.fst_default_cache_gc_limit}\")\n cmd.append(self.input_far_path)\n cmd.append(self.output_far_path)\n cmd.append(self.align_path)\n cmd.append(self.afst_path)\n cmd = [str(x) for x in cmd]\n logger.debug(f\"Subprocess call: {cmd}\")\n subprocess.check_call(cmd, env=os.environ)\n logger.info(\"Completed computing alignments!\")",
"def apply_attention(input, attention):\n n, c = input.size()[:2]\n glimpses = attention.size(1) # glimpses is equivalent to multiple heads in attention\n\n # flatten the spatial dims into the third dim, since we don't need to care about how they are arranged\n input = input.view(n, 1, c, -1) # [n, 1, c, s] [batch, 1, channels, height*width] [48, 1, 2048, 7*7]\n attention = attention.view(n, glimpses, -1) # [48, 2, 7*7]\n attention = torch.nn.functional.softmax(attention, dim=-1).unsqueeze(2) # [n, g, 1, s] [batch, multi_head, 1, height*width] [48, 2, 1, 7*7]\n weighted = attention * input # [n, g, c, s] [48, 2, 2048, 7*7]\n weighted_mean = weighted.sum(dim=-1) # [n, g, c] [48, 2, 2048]\n return weighted_mean.view(n, -1) # [48, 4096]",
"def apply_attention(input, attention):\n # import pdb\n # pdb.set_trace()\n n, c = input.size()[:2]\n glimpses = attention.size(1)\n\n # flatten the spatial dims into the third dim, since we don't need to care about how they are arranged\n input = input.view(n, c, -1)\n attention = attention.view(n, glimpses, -1)\n s = input.size(2)\n\n # apply a softmax to each attention map separately\n # since softmax only takes 2d inputs, we have to collapse the first two dimensions together\n # so that each glimpse is normalized separately\n attention = attention.view(n * glimpses, -1)\n attention = F.softmax(attention)\n\n # apply the weighting by creating a new dim to tile both tensors over\n target_size = [n, glimpses, c, s]\n input = input.view(n, 1, c, s).expand(*target_size)\n attention = attention.view(n, glimpses, 1, s).expand(*target_size)\n weighted = input * attention\n # sum over only the spatial dimension\n weighted_mean = weighted.sum(dim=3)\n # the shape at this point is (n, glimpses, c, 1)\n return weighted_mean.view(n, -1)",
"def __align_mtms_to_psms(self):\n T_psm1 = self.__psm1_kin__.forward(self.__psm1_last_jnt__)\n T_psm2 = self.__psm2_kin__.forward(self.__psm2_last_jnt__)\n \n T_mtml = self.__mtml_kin__.forward(self.__last_mtml_jnt__)\n T_mtmr = self.__mtmr_kin__.forward(self.__last_mtmr_jnt__)\n T_mtml_000 = self.__mtml_kin__.forward([0,0,0,0,0,0,0])\n \n T_mtml[0:3,0:3] = (((self.__T_ecm__ * (T_mtml_000 ** -1)) ** -1) * T_psm2)[0:3,0:3]\n T_mtmr[0:3, 0:3] = T_psm1[0:3, 0:3]\n \n jnt_mtml = self.__mtml_kin__.inverse(T_mtml)\n jnt_mtmr = self.__mtmr_kin__.inverse(T_mtmr)\n print('aligning')\n if self.__mode__ == self.MODE.hardware:\n self.__hw_mtml__.move_joint_list( jnt_mtml.tolist(), range(0, len(jnt_mtml)), interpolate=True)\n self.__hw_mtmr__.move_joint_list( jnt_mtmr.tolist(), range(0, len(jnt_mtmr)), interpolate=True)",
"def alignments(self):\n return list(self.kernels_by_alignment.keys())",
"def get_memes(activations, sequences, y, output_file_path):\n #find the threshold value for activation\n activation_threshold = 0.5*np.amax(activations, axis=(0,2))\n\n # Get the number of filters\n N_FILTERS = activations.shape[1]\n\n #pad sequences:\n #npad = ((0, 0), (0, 0), (9, 9))\n #sequences = np.pad(sequences, pad_width=npad, mode='constant', constant_values=0)\n\n pwm = np.zeros((N_FILTERS, 4, 19))\n pfm = np.zeros((N_FILTERS, 4, 19))\n nsamples = activations.shape[0]\n\n OCR_matrix = np.zeros((N_FILTERS, y.shape[0]))\n activation_indices = []\n activated_OCRs = np.zeros((N_FILTERS, y.shape[1]))\n n_activated_OCRs = np.zeros(N_FILTERS)\n total_seq = np.zeros(N_FILTERS)\n\n for i in tqdm(range(N_FILTERS)):\n #create list to store 19 bp sequences that activated filter\n act_seqs_list = []\n act_OCRs_tmp = []\n for j in range(nsamples):\n # find all indices where filter is activated\n indices = np.where(activations[j,i,:] > activation_threshold[i])\n\n #save ground truth peak heights of OCRs activated by each filter\n if indices[0].shape[0]>0:\n act_OCRs_tmp.append(y[j, :])\n OCR_matrix[i, j] = 1\n\n for start in indices[0]:\n activation_indices.append(start)\n end = start+19\n act_seqs_list.append(sequences[j,:,start:end])\n\n #convert act_seqs from list to array\n if act_seqs_list:\n act_seqs = np.stack(act_seqs_list)\n pwm_tmp = np.sum(act_seqs, axis=0)\n pfm_tmp=pwm_tmp\n total = np.sum(pwm_tmp, axis=0)\n pwm_tmp = np.nan_to_num(pwm_tmp/total)\n\n pwm[i] = pwm_tmp\n pfm[i] = pfm_tmp\n\n #store total number of sequences that activated that filter\n total_seq[i] = len(act_seqs_list)\n\n #save mean OCR activation\n act_OCRs_tmp = np.stack(act_OCRs_tmp)\n activated_OCRs[i, :] = np.mean(act_OCRs_tmp, axis=0)\n\n #save the number of activated OCRs\n n_activated_OCRs[i] = act_OCRs_tmp.shape[0]\n\n\n activated_OCRs = np.stack(activated_OCRs)\n\n #write motifs to meme format\n #PWM file:\n meme_file = open(output_file_path, 'w')\n meme_file.write(\"MEME version 4 \\n\")\n\n print('Saved PWM File as : {}'.format(output_file_path))\n\n for i in range(0, N_FILTERS):\n if np.sum(pwm[i,:,:]) >0:\n meme_file.write(\"\\n\")\n meme_file.write(\"MOTIF filter%s \\n\" % i)\n meme_file.write(\"letter-probability matrix: alength= 4 w= %d \\n\" % np.count_nonzero(np.sum(pwm[i,:,:], axis=0)))\n\n\n for j in range(0, 19):\n if np.sum(pwm[i,:,j]) > 0:\n meme_file.write(str(pwm[i,0,j]) + \"\\t\" + str(pwm[i,1,j]) + \"\\t\" + str(pwm[i,2,j]) + \"\\t\" + str(pwm[i,3,j]) + \"\\n\")\n\n meme_file.close()",
"def emotion_attention(self, last_encode_hiddens, con_decode_hiddens):\r\n batch_emotion_words = self.get_batch_emotion_words()\r\n batch_emotion_embeddings = tf.nn.embedding_lookup(self.embeddings, batch_emotion_words) # [batch, emotion, embedding]\r\n emotion_word_w = tf.stack([self.emotion_word_trans_w for _ in range(self.batch_size)], axis=0) # [batch, embedding, 1]\r\n emotion_word_sim = tf.matmul(batch_emotion_embeddings, emotion_word_w) # [batch, emotion, 1]\r\n\r\n encode_hidden_single = tf.matmul(last_encode_hiddens, self.emotion_post_trans_w) # [batch, 1]\r\n encode_hidden_sim = tf.stack([encode_hidden_single for _ in range(self.emotion_vocab_size)], axis=1) # [batch, emotion, 1]\r\n\r\n decode_hidden_single = tf.matmul(con_decode_hiddens, self.emotion_response_trans_w) # [batch, 1]\r\n decode_hidden_sim = tf.stack([decode_hidden_single for _ in range(self.emotion_vocab_size)], axis=1) # [batch, emotion, 1]\r\n\r\n emotion_attention_scores = self.activate(emotion_word_sim + encode_hidden_sim + decode_hidden_sim) # [batch, emotion, 1]\r\n sfx_emotion_scores = tf.nn.softmax(emotion_attention_scores, dim=1)\r\n emotion_att_vectors = tf.reduce_sum(sfx_emotion_scores * batch_emotion_embeddings, axis=1) # [batch, embedding]\r\n return emotion_att_vectors",
"def get_alignments():\n clean_expired_sessions()\n\n # reads the session\n session = request.args.get('session', type=str)\n # reads the requested process name\n process = request.args.get('process', default='receipt', type=str)\n\n logging.info(\"get_alignments start session=\" + str(session) + \" process=\" + str(process))\n\n dictio = {}\n\n if Configuration.overall_enable_alignments:\n if check_session_validity(session):\n user = get_user_from_session(session)\n if lh.check_user_log_visibility(user, process):\n Commons.semaphore_matplot.acquire()\n try:\n petri_string = request.json[\"model\"]\n svg_on_petri, svg_table = lh.get_handler_for_process_and_session(process, session).get_alignments(\n petri_string,\n parameters={})\n dictio = {\"petri\": svg_on_petri.decode('utf-8'), \"table\": svg_table.decode('utf-8')}\n except:\n logging.error(traceback.format_exc())\n pass\n\n logging.info(\n \"get_alignments complete session=\" + str(session) + \" process=\" + str(process) + \" user=\" + str(\n user))\n\n Commons.semaphore_matplot.release()\n\n ret = jsonify(dictio)\n\n return ret",
"def align(self):\n\n # load the alignment parameters into the align_params object\n self.align_params.load_params_from_file(self.input_file)\n self.init_matrix()\n # populate the score matrices based on the input parameters\n self.populate_score_matrices()\n #self.traceback()\n #self.write_output()\n # perform a traceback and write the output to an output file\n ### FILL IN ###",
"def __call__(self, inputs_to_attend, attn_gen):\n _, height, width, _ = inputs_to_attend.get_shape().as_list()\n attention_size = height * width\n # Use a MLP here\n attention = layers.fully_connected(attn_gen, 10, scope='attn_spatial1')\n attention = layers.fully_connected(\n attention,\n attention_size,\n activation_fn=None,\n scope='attn_spatial2')\n attention = tf.nn.softmax(attention)\n\n # [batch_size, kernel_size, kernel_size, n_channels]\n inputs_shape = inputs_to_attend.get_shape().as_list()\n # reshape to [batch_size, kernel_size, kernel_size]\n attention_shaped = tf.reshape(attention, inputs_shape[:3])\n attention_shaped = tf.expand_dims(attention_shaped, axis=-1)\n inputs_to_attend *= attention_shaped\n\n return inputs_to_attend, attention",
"def attention_intervention_experiment(self, intervention, effect):\n # E.g. The doctor asked the nurse a question. He\n x = intervention.base_strings_tok[0]\n # E.g. The doctor asked the nurse a question. She\n x_alt = intervention.base_strings_tok[1]\n\n if effect == 'indirect':\n input = x_alt # Get attention for x_alt\n elif effect == 'direct':\n input = x # Get attention for x\n else:\n raise ValueError(f\"Invalid effect: {effect}\")\n batch = torch.tensor(input).unsqueeze(0).to(self.device)\n attention_override = self.model(batch)[-1]\n\n batch_size = 1\n seq_len = len(x)\n seq_len_alt = len(x_alt)\n assert seq_len == seq_len_alt\n assert len(attention_override) == self.num_layers\n assert attention_override[0].shape == (batch_size, self.num_heads, seq_len, seq_len)\n\n with torch.no_grad():\n\n candidate1_probs_head = torch.zeros((self.num_layers, self.num_heads))\n candidate2_probs_head = torch.zeros((self.num_layers, self.num_heads))\n candidate1_probs_layer = torch.zeros(self.num_layers)\n candidate2_probs_layer = torch.zeros(self.num_layers)\n\n if effect == 'indirect':\n context = x\n else:\n context = x_alt\n\n # Intervene at every layer and head by overlaying attention induced by x_alt\n model_attn_override_data = [] # Save layer interventions for model-level intervention later\n for layer in range(self.num_layers):\n layer_attention_override = attention_override[layer]\n attention_override_mask = torch.ones_like(layer_attention_override, dtype=torch.uint8)\n layer_attn_override_data = [{\n 'layer': layer,\n 'attention_override': layer_attention_override,\n 'attention_override_mask': attention_override_mask\n }]\n candidate1_probs_layer[layer], candidate2_probs_layer[layer] = self.attention_intervention(\n context=context,\n outputs=intervention.candidates_tok,\n attn_override_data = layer_attn_override_data)\n model_attn_override_data.extend(layer_attn_override_data)\n for head in range(self.num_heads):\n attention_override_mask = torch.zeros_like(layer_attention_override, dtype=torch.uint8)\n attention_override_mask[0][head] = 1 # Set mask to 1 for single head only\n head_attn_override_data = [{\n 'layer': layer,\n 'attention_override': layer_attention_override,\n 'attention_override_mask': attention_override_mask\n }]\n candidate1_probs_head[layer][head], candidate2_probs_head[layer][head] = self.attention_intervention(\n context=context,\n outputs=intervention.candidates_tok,\n attn_override_data=head_attn_override_data)\n\n # Intervene on entire model by overlaying attention induced by x_alt\n candidate1_probs_model, candidate2_probs_model = self.attention_intervention(\n context=context,\n outputs=intervention.candidates_tok,\n attn_override_data=model_attn_override_data)\n\n return candidate1_probs_head, candidate2_probs_head, candidate1_probs_layer, candidate2_probs_layer,\\\n candidate1_probs_model, candidate2_probs_model",
"def _compute_rewards(self,\n frequent_sequences: torch.Tensor, # [flock_size, n_frequent_seqs, seq_len]\n frequent_seq_occurrences: torch.Tensor,\n context_data: torch.Tensor, # [flock_size, n_providers, 3, context_size]\n frequent_context_likelihoods: torch.Tensor, # [flock_size, n_frequent_seqs, seq_length, n_providers, context_size)\n seq_probs_priors_clusters_context: torch.Tensor,\n frequent_rewards_punishments: torch.Tensor, # [flock_size, n_frequent_seqs, seq_length, 2]\n influence_model: torch.Tensor, # [flock_size, n_frequent_seqs, seq_lookahead, n_cluster_centers]\n seq_rewards_goal_directed: torch.Tensor): # [flock_size, n_frequent_seqs, 2]\n influence_model[torch.isnan(influence_model)] = 0\n context_data[torch.isnan(context_data)] = 0\n\n # Unsqueeze and expand both the context data and the context on probs so that they can be multiplied\n context_data_rewards_punishments = multi_unsqueeze(context_data[:, :, 1:],\n [1, 2]).expand(self._flock_size, self.n_frequent_seqs,\n self.seq_length, self.n_providers, 2,\n self.context_size)\n\n context_on_probs = frequent_context_likelihoods.unsqueeze(dim=4).expand(self._flock_size, self.n_frequent_seqs,\n self.seq_length, self.n_providers, 2,\n self.context_size)\n\n # Calculate the rewards/punishments for each cluster in each freq_seq lookahead assuming every\n # transition is perfect. AKA the undiscounted rewards from the parents.\n current_rewards = (context_data_rewards_punishments * context_on_probs).sum(dim=5)\n\n # Get the average freq_reward that this expert has seen by dividing the seen rewards by the occurrences\n frequent_rewards_punishments_scaled =\\\n frequent_rewards_punishments / multi_unsqueeze(frequent_seq_occurrences.float() + SMALL_CONSTANT, [2, 3]).expand(frequent_rewards_punishments.size())\n\n # Expand the scaled rewards for addition to the current rewards\n # NOTE: This can be done in extract frequent sequence in the learning process instead of here\n frequent_rewards_punishments_scaled = frequent_rewards_punishments_scaled.unsqueeze(dim=3).expand(self._flock_size, self.n_frequent_seqs, self.seq_lookahead,\n self.n_providers, 2) * self.own_rewards_weight\n\n # Add in the scaled rewards to the lookahead part of the current rewards.\n current_rewards[:, :, self.seq_lookbehind:, :, :] += frequent_rewards_punishments_scaled\n\n # We iterate backwards through the possibilities of sequences to transform the undiscounted cluster rewards into\n # expected_values of following transitions\n\n # Allocate some temp storage for the kernel - This is the expected rewards/punishments for each destination\n # cluster when following the sequence from the current cluster, for all parents and all sequences\n cluster_rewards = torch.zeros((self._flock_size, self.n_frequent_seqs, self.n_providers, 2,\n self.n_cluster_centers), dtype=self._float_dtype, device=self.device)\n\n n_transitions = self.seq_length - 1\n # If this is the bottom level, we process all lookahead transitions, otherwise only down to\n # sequence_lookahead - 1. We do this so that higher levels of abstraction don't 'double discount' the expected\n # rewards of the lowest level for the next transition.\n if self.produce_actions:\n last_processed_transition = n_transitions - self.seq_lookahead\n else:\n last_processed_transition = n_transitions - (self.seq_lookahead - 1)\n\n # Now we iterate over the transitions of the sequence, starting from the last one (at n_transitions),\n # and moving towards the current point in the sequence (marked by last_processed_transition). The indices\n # for processing this range are a bit funky, as the index for the final transition is n_transitions-1, and the\n # true last_processed_transition index is last_processed_transition - 1.\n for transition in range(n_transitions - 1, last_processed_transition - 1, -1):\n cluster_rewards.fill_(0)\n\n tp_process_kernels.discount_rewards_iterative(frequent_sequences,\n seq_probs_priors_clusters_context,\n current_rewards,\n influence_model,\n cluster_rewards,\n self._flock_size,\n self.n_frequent_seqs,\n self.n_cluster_centers,\n transition,\n self.n_providers)\n # Get the EV of trying to get to the next cluster of this sequence.\n transition_rewards = cluster_rewards.sum(dim=4) * REWARD_DISCOUNT_FACTOR\n\n # Update current rewards with the new EV of this transition (using the EV of rewards gained\n # when attempting this transition)\n current_rewards[:, :, transition + 1] = transition_rewards\n\n # Take max of transition EV vs origin cluster EV (for each provider) and update current rewards with it\n # Creates lower bound for EV\n max_ev = torch.max(current_rewards[:, :, transition], current_rewards[:, :, transition + 1])\n current_rewards[:, :, transition] = max_ev\n\n # Scale all rewards by their prior sequence probabilities\n discounted_rewards = current_rewards * multi_unsqueeze(seq_probs_priors_clusters_context,\n [2, 3, 4]).expand(self._flock_size, self.n_frequent_seqs,\n self.seq_length, self.n_providers, 2)\n\n # we care about the rewards and punishments for this transition, so get them\n next_potentials = discounted_rewards[:, :, self.seq_lookbehind]\n\n # Find max over all providers in each sequence in the influence_model\n next_potentials, _ = torch.max(next_potentials, dim=2)\n\n # Copy this into the return\n seq_rewards_goal_directed.copy_(next_potentials)",
"def attention_layer(query_layer_l,\n key_layer_l,\n value_layer_l,\n from_tensor,\n to_tensor,\n attention_mask=None,\n num_attention_heads=1,\n size_per_head=512,\n query_act=None,\n key_act=None,\n value_act=None,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n do_return_2d_tensor=False,\n batch_size=None,\n from_seq_length=None,\n to_seq_length=None):\n\n def transpose_for_scores(input_tensor, batch_size, num_attention_heads,\n seq_length, width):\n output_tensor = tf.reshape(\n input_tensor, [batch_size, seq_length, num_attention_heads, width])\n\n output_tensor = tf.transpose(a=output_tensor, perm=[0, 2, 1, 3])\n return output_tensor\n\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3],name='')\n to_shape = get_shape_list(to_tensor, expected_rank=[2, 3],name='')\n\n if len(from_shape) != len(to_shape):\n raise ValueError(\n \"The rank of `from_tensor` must match the rank of `to_tensor`.\")\n\n if len(from_shape) == 3:\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n to_seq_length = to_shape[1]\n elif len(from_shape) == 2:\n if (batch_size is None or from_seq_length is None or to_seq_length is None):\n raise ValueError(\n \"When passing in rank 2 tensors to attention_layer, the values \"\n \"for `batch_size`, `from_seq_length`, and `to_seq_length` \"\n \"must all be specified.\")\n\n # Scalar dimensions referenced here:\n # B = batch size (number of sequences)\n # F = `from_tensor` sequence length\n # T = `to_tensor` sequence length\n # N = `num_attention_heads`\n # H = `size_per_head`\n\n from_tensor_2d = reshape_to_matrix(from_tensor)\n to_tensor_2d = reshape_to_matrix(to_tensor)\n\n # `query_layer` = [B*F, N*H]\n # query_layer = tf.compat.v1.layers.dense(\n # from_tensor_2d,\n # num_attention_heads * size_per_head,\n # activation=query_act,\n # name=\"query\",\n # kernel_initializer=create_initializer(initializer_range))\n\n # query_layer_l=Dense(name='query',\n # kernel_initializer=create_initializer(initializer_range),\n # units=num_attention_heads * size_per_head,\n # activation=query_act)\n\n query_layer=query_layer_l(from_tensor_2d)\n\n # `key_layer` = [B*T, N*H]\n # key_layer = tf.compat.v1.layers.dense(\n # to_tensor_2d,\n # num_attention_heads * size_per_head,\n # activation=key_act,\n # name=\"key\",\n # kernel_initializer=create_initializer(initializer_range))\n # key_layer_l = Dense(name='key',\n # kernel_initializer=create_initializer(initializer_range),\n # units=num_attention_heads * size_per_head,\n # activation=key_act)\n\n key_layer = key_layer_l(to_tensor_2d)\n\n\n # `value_layer` = [B*T, N*H]\n # value_layer = tf.compat.v1.layers.dense(\n # to_tensor_2d,\n # num_attention_heads * size_per_head,\n # activation=value_act,\n # name=\"value\",\n # kernel_initializer=create_initializer(initializer_range))\n # value_layer_l = Dense(name='value',\n # kernel_initializer=create_initializer(initializer_range),\n # units=num_attention_heads * size_per_head,\n # activation=value_act)\n\n value_layer = value_layer_l(to_tensor_2d)\n\n\n # `query_layer` = [B, N, F, H]\n query_layer = transpose_for_scores(query_layer, batch_size,\n num_attention_heads, from_seq_length,\n size_per_head)\n\n # `key_layer` = [B, N, T, H]\n key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,\n to_seq_length, size_per_head)\n\n # Take the dot product between \"query\" and \"key\" to get the raw\n # attention scores.\n # `attention_scores` = [B, N, F, T]\n attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)\n attention_scores = tf.multiply(attention_scores,\n 1.0 / math.sqrt(float(size_per_head)))\n\n if attention_mask is not None:\n # `attention_mask` = [B, 1, F, T]\n attention_mask = tf.expand_dims(attention_mask, axis=[1])\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0\n\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n attention_scores += adder\n\n # Normalize the attention scores to probabilities.\n # `attention_probs` = [B, N, F, T]\n attention_probs = tf.nn.softmax(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = dropout(attention_probs, attention_probs_dropout_prob)\n\n # `value_layer` = [B, T, N, H]\n value_layer = tf.reshape(\n value_layer,\n [batch_size, to_seq_length, num_attention_heads, size_per_head])\n\n # `value_layer` = [B, N, T, H]\n value_layer = tf.transpose(a=value_layer, perm=[0, 2, 1, 3])\n\n # `context_layer` = [B, N, F, H]\n context_layer = tf.matmul(attention_probs, value_layer)\n\n # `context_layer` = [B, F, N, H]\n context_layer = tf.transpose(a=context_layer, perm=[0, 2, 1, 3])\n\n if do_return_2d_tensor:\n # `context_layer` = [B*F, N*H]\n context_layer = tf.reshape(\n context_layer,\n [batch_size * from_seq_length, num_attention_heads * size_per_head])\n else:\n # `context_layer` = [B, F, N*H]\n context_layer = tf.reshape(\n context_layer,\n [batch_size, from_seq_length, num_attention_heads * size_per_head])\n\n return context_layer"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Construct the `JointAttentionWrapper`. NOTE If you are using the `BeamSearchDecoder` with a cell wrapped in
|
def __init__(self,
cell,
attention_mechanism,
attention_layer_size=None,
alignment_history=False,
cell_input_fn=None,
output_attention=True,
initial_cell_state=None,
name=None):
super(JointAttentionWrapper, self).__init__(name=name)
if not rnn_cell_impl._like_rnncell(cell): # pylint: disable=protected-access
raise TypeError(
"cell must be an RNNCell, saw type: %s" % type(cell).__name__)
if isinstance(attention_mechanism, (list, tuple)):
self._is_multi = True
attention_mechanisms = attention_mechanism
for attention_mechanism in attention_mechanisms:
if not isinstance(attention_mechanism, AttentionMechanism):
raise TypeError(
"attention_mechanism must contain only instances of "
"AttentionMechanism, saw type: %s"
% type(attention_mechanism).__name__)
else:
self._is_multi = False
print('instance', isinstance(attention_mechanism, AttentionMechanism), type(attention_mechanism))
if not isinstance(attention_mechanism, AttentionMechanism):
raise TypeError(
"attention_mechanism must be an AttentionMechanism or list of "
"multiple AttentionMechanism instances, saw type: %s"
% type(attention_mechanism).__name__)
attention_mechanisms = (attention_mechanism,)
if cell_input_fn is None:
cell_input_fn = (
lambda inputs, encoder_attention, decoder_attention: array_ops.concat(
[inputs, encoder_attention, decoder_attention], -1))
else:
if not callable(cell_input_fn):
raise TypeError(
"cell_input_fn must be callable, saw type: %s"
% type(cell_input_fn).__name__)
if attention_layer_size is not None:
attention_layer_sizes = tuple(
attention_layer_size
if isinstance(attention_layer_size, (list, tuple))
else (attention_layer_size,))
if len(attention_layer_sizes) != len(attention_mechanisms):
raise ValueError(
"If provided, attention_layer_size must contain exactly one "
"integer per attention_mechanism, saw: %d vs %d"
% (len(attention_layer_sizes), len(attention_mechanisms)))
self._attention_layers = tuple(
layers_core.Dense(
attention_layer_size,
name="attention_layer",
use_bias=False,
dtype=attention_mechanisms[i].dtype)
for i, attention_layer_size in enumerate(attention_layer_sizes))
self._attention_layer_size = sum(attention_layer_sizes)
else:
self._attention_layers = None
self._attention_layer_size = sum(
attention_mechanism.values.get_shape()[-1].value
for attention_mechanism in attention_mechanisms)
self._cell = cell
self._attention_mechanisms = attention_mechanisms
self._cell_input_fn = cell_input_fn
self._output_attention = output_attention
self._alignment_history = alignment_history
with ops.name_scope(name, "AttentionWrapperInit"):
if initial_cell_state is None:
self._initial_cell_state = None
else:
final_state_tensor = nest.flatten(initial_cell_state)[-1]
state_batch_size = (
final_state_tensor.shape[0].value
or array_ops.shape(final_state_tensor)[0])
error_message = (
"When constructing JointAttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and initial_cell_state. Are you using "
"the BeamSearchDecoder? You may need to tile your initial state "
"via the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width.")
with ops.control_dependencies(
self._batch_size_checks(state_batch_size, error_message)):
self._initial_cell_state = nest.map_structure(
lambda s: array_ops.identity(s, name="check_initial_cell_state"),
initial_cell_state)
|
[
"def create_joint_at(obj = None):\n\n return create_at(obj, create = 'joint')",
"def create_weld_joint(self, *args, **kwargs):\r\n joint = joints.WeldJoint(*args, **kwargs)\r\n self.add_joint(joint)\r\n return joint",
"def new_joint(name, **kwargs):\n return new_element(tag=\"joint\", name=name, **kwargs)",
"def __init__(self, D, S, lmbda=None, mu=0.0, opt=None):\n\n if opt is None:\n opt = BPDN.Options()\n super(BPDNJoint, self).__init__(D, S, lmbda, opt)\n self.mu = self.dtype.type(mu)",
"def insertJoint(object):\n pass",
"def joint_init(self):\n if self.joint_id in [5, 8]:\n self.joint = basic_classes.Joint(self.joint_id, 200)\n elif self.joint_id in [6, 7]:\n self.joint = basic_classes.Joint(self.joint_id, 800)",
"def __init__(self,\n model,\n batch_ndims=None,\n use_vectorized_map=False,\n validate_args=False,\n experimental_use_kahan_sum=False,\n name=None):\n super(_JointDistributionNamed, self).__init__(\n model,\n batch_ndims=batch_ndims,\n use_vectorized_map=use_vectorized_map,\n validate_args=validate_args,\n experimental_use_kahan_sum=experimental_use_kahan_sum,\n name=name or 'JointDistributionNamed')",
"def formMarker(s, joint):\n win = cmds.playblast(activeEditor=True) # Viewport\n cam = cmds.modelEditor(win, q=True, camera=True) # Camera\n p1 = om.MVector(cmds.xform(cam, q=True, ws=True, t=True)) # Cam pos\n p2 = om.MVector(0,0,0) # Center of world\n scale = ((p2 - p1).length()) * 0.15\n wrapper = cmds.group(em=True) # Hold position\n marker = cmds.group(em=True)\n cmds.scale(scale, scale, scale, marker)\n cmds.parent(marker, wrapper)\n for ax in AXIS: # Build Axis\n pos = AXIS[ax]\n c = cmds.curve(p=((0,0,0), pos), d=1)\n shape = cmds.listRelatives(c, s=True)[0]\n SetColour(shape, AXISCOLOUR[ax])\n cmds.parent(shape, marker, s=True, r=True)\n cmds.delete(c)\n cmds.parentConstraint(joint, wrapper)\n ro = cmds.xform(joint, q=True, ws=True, ro=True)\n roo = cmds.xform(joint, q=True, roo=True)\n cmds.xform(marker, roo=roo)\n cmds.xform(marker, ro=ro, ws=True)\n children = cmds.listRelatives(joint, type=\"joint\") or [] # if not a leaf joint, aim at child\n if len(children) == 1: # If we are already pointing at child, retain aim\n matrix = zip(*[iter(cmds.xform(joint, ws=True, q=True, m=True))]*4)\n child_pos = om.MVector(cmds.xform(children[0], q=True, ws=True, t=True))\n parent_pos = om.MVector(matrix[3][:3])\n aim_vec = child_pos - parent_pos\n for i, axis in enumerate(matrix[:3]):\n vec = om.MVector(axis[:3])\n if vec.isParallel(aim_vec): # We found our pointing axis\n ax = [[1 if a == b else 0 for a in range(3)] for b in range(3)]\n aim = ax[i]\n up = ax[1 if i != 1 else 2] # Up Y or Z\n cmds.aimConstraint(\n children[0],\n marker,\n aim=aim,\n u=up,\n skip=\"xyz\"[i],\n wut=\"vector\",\n wu=up\n )\n cmds.select(marker, r=True)\n return marker, wrapper",
"def create_pulley_joint(self, *args, **kwargs):\r\n joint = joints.PulleyJoint(*args, **kwargs)\r\n self.add_joint(joint)\r\n return joint",
"def annotate(component, annotation, indicator):\n import abjad\n assert isinstance(annotation, str), repr(annotation)\n wrapper = abjad.Wrapper(\n annotation=annotation,\n component=component,\n indicator=indicator,\n )\n wrapper._bind_to_component(component)",
"def read_joints(rect):\n # Mapping from MPII joints to LSP joints (0:13). In this roder:\n _COMMON_JOINT_IDS = [\n 0, # R ankle\n 1, # R knee\n 2, # R hip\n 3, # L hip\n 4, # L knee\n 5, # L ankle\n 10, # R Wrist\n 11, # R Elbow\n 12, # R shoulder\n 13, # L shoulder\n 14, # L Elbow\n 15, # L Wrist\n 8, # Neck top\n 9, # Head top\n ]\n assert ('annopoints' in rect._fieldnames)\n points = rect.annopoints.point\n if not isinstance(points, np.ndarray):\n # There is only one! so ignore this image\n return None\n # Not all joints are there.. read points in a dict.\n read_points = {}\n\n for point in points:\n vis = convert_is_visible(point.is_visible)\n read_points[point.id] = np.array([point.x, point.y, vis])\n\n # Go over each common joint ids\n joints = np.zeros((3, len(_COMMON_JOINT_IDS)))\n for i, jid in enumerate(_COMMON_JOINT_IDS):\n if jid in read_points.keys():\n joints[:, i] = read_points[jid]\n # If it's annotated, then use it as visible\n # (in this visible = 0 iff no gt label)\n joints[2, i] = 1.\n\n return joints",
"def get_joint_info(self, root, tip):\n chain = self.robot_desc.get_chain(root, tip)\n if self.robot_desc is None:\n raise ValueError('Robot description not loaded from urdf')\n\n joint_list = []\n upper = []\n lower = []\n actuated_names = []\n\n for item in chain:\n if item in self.robot_desc.joint_map:\n joint = self.robot_desc.joint_map[item]\n joint_list += [joint]\n if joint.type in self.actuated_types:\n actuated_names += [joint.name]\n if joint.type == \"continuous\":\n upper += [cs.inf]\n lower += [-cs.inf]\n else:\n upper += [joint.limit.upper]\n lower += [joint.limit.lower]\n if joint.axis is None:\n joint.axis = [1., 0., 0.]\n if joint.origin is None:\n joint.origin = Pose(xyz=[0., 0., 0.],\n rpy=[0., 0., 0.])\n elif joint.origin.xyz is None:\n joint.origin.xyz = [0., 0., 0.]\n elif joint.origin.rpy is None:\n joint.origin.rpy = [0., 0., 0.]\n\n return joint_list, actuated_names, upper, lower",
"def create_distance_joint(self, *args, **kwargs):\r\n joint = joints.DistanceJoint(*args, **kwargs)\r\n self.add_joint(joint)\r\n return joint",
"def _build_attention(self, rank: int):\n super(QcQuantizableMultiHeadAttention, self)._build_attention(rank)\n\n def scale_and_multiply(inputs):\n return special_math_ops.einsum(self._dot_product_equation,\n inputs[0],\n math_ops.multiply(inputs[1], 1.0 / math.sqrt(float(self._key_dim))))\n\n self._attention_score_layer = tf.keras.layers.Lambda(scale_and_multiply, name=\"scale_and_multiply\")\n self._wrapped_attention_score_layer = self._wrap_layer(self._attention_score_layer, 2)\n\n self._identity_layer = tf.keras.layers.Lambda(lambda x: x, name=\"identity\")\n self._wrapped_identity_layer = self._wrap_layer(self._identity_layer, 1)\n\n def masked_add(inputs):\n adder = (1.0 - math_ops.cast(inputs[1], inputs[0].dtype)) * (_large_compatible_negative(inputs[0].dtype))\n return inputs[0] + adder\n\n self._add_layer = tf.keras.layers.Lambda(masked_add, name=\"masked_add\")\n self._wrapped_addition = self._wrap_layer(self._add_layer, 2)\n\n def softmax_func(inputs):\n return self._masked_softmax(inputs)\n\n self._softmax_layer = tf.keras.layers.Lambda(softmax_func, name=\"softmax\")\n self._wrapped_masked_softmax = self._wrap_layer(self._softmax_layer, 1)\n\n def combine_qkv(inputs):\n return special_math_ops.einsum(self._combine_equation, inputs[0], inputs[1])\n\n self._combine_qkv_layer = tf.keras.layers.Lambda(combine_qkv, name=\"combine_qkv\")\n self._wrapped_combine_qkv_layer = self._wrap_layer(self._combine_qkv_layer, 2)",
"def create_rope_joint(self, *args, **kwargs):\r\n joint = joints.RopeJoint(*args, **kwargs)\r\n self.add_joint(joint)\r\n return joint",
"def init_trajectory(self):\n state = self.jointStatePublisher.last_joint_states\n self.trajectory_t0 = self.robot.getTime()\n self.trajectory = JointTrajectory()\n self.trajectory.joint_names = self.prefixedJointNames\n self.trajectory.points = [JointTrajectoryPoint(\n positions=state.position if state else [0] * 7,\n velocities=[0] * 7,\n accelerations=[0] * 7,\n time_from_start=rospy.Duration(0.0))]",
"def insertJointCtx(*args, **kwargs):\n\n pass",
"def init_attn(self):\n self.ph_attn = attention.GlobalAttention(query_dim=self.hidden_size,\n key_dim=self.word_emb_size)\n return",
"def to_etree(self):\n elements_to_rtn = [] # A list of elements that will be returned\n # and then appended to the body\n annotation_body = Element('annotation')\n # TO RETURN\n elements_to_rtn.append(annotation_body)\n\n mention_id = SubElement(annotation_body, 'mention')\n mention_id.set('id', self.id)\n\n annotator_id = SubElement(annotation_body, 'annotator')\n annotator_id.set('id', 'eHOST_2010')\n annotator_id.text = self.annotator\n\n span = SubElement(annotation_body, 'span', {'start': str(self.span_in_document[0]),\n 'end': str(self.span_in_document[1])})\n spanned_text = SubElement(annotation_body, 'spannedText')\n spanned_text.text = self.text\n creation_date = SubElement(annotation_body, 'creationDate')\n creation_date.text = self.datetime\n\n\n # Now create class_mention\n class_mention = Element(\"classMention\")\n class_mention.set(\"id\", self.id)\n # TO RETURN\n elements_to_rtn.append(class_mention)\n #mention_class.set('id', self.classification)\n mention_class = SubElement(class_mention, 'mentionClass')\n mention_class.set('id', self.annotation_type)\n mention_class.text = self.text\n\n # Add attributes\n # ASSERTION\n # These fields point to stringSlotMention fields that contain the attributes\n slot_mention_assertion_id = self.id + '1'\n\n has_slot_mention_assertion = SubElement(class_mention, 'hasSlotMention')\n has_slot_mention_assertion.set('id', slot_mention_assertion_id)\n\n string_slot_mention_assertion = Element('stringSlotMention')\n # TO RETURN\n elements_to_rtn.append(string_slot_mention_assertion)\n string_slot_mention_assertion.set('id', slot_mention_assertion_id)\n mention_slot_assertion = SubElement(string_slot_mention_assertion, 'mentionSlot')\n mention_slot_assertion.set('id', 'assertion')\n string_slot_mention_value_assertion = SubElement(string_slot_mention_assertion, 'stringSlotMentionValue')\n string_slot_mention_value_assertion.set('value', self.attributes['assertion'])\n\n # TEMPORALITY\n slot_mention_temporality_id = self.id + '2'\n has_slot_mention_temporality = SubElement(class_mention, 'hasSlotMention')\n has_slot_mention_temporality.set('id', slot_mention_temporality_id)\n\n string_slot_mention_temporality = Element('stringSlotMention')\n # TO RETURN\n elements_to_rtn.append(string_slot_mention_temporality)\n string_slot_mention_temporality.set('id', slot_mention_temporality_id)\n mention_slot_temporality = SubElement(string_slot_mention_temporality, 'mentionSlot')\n mention_slot_temporality.set('id', 'temporality')\n string_slot_mention_value_temporality = SubElement(string_slot_mention_temporality, 'stringSlotMentionValue')\n string_slot_mention_value_temporality.set('value', self.attributes['temporality'])\n\n if self.annotation_type != 'Evidence of SSI':\n return elements_to_rtn\n\n\n # CLASSIFICATION\n # Add 'classification' field for 'infection_type'\n slot_mention_classification_id = self.id + '3'\n has_slot_mention_classification = SubElement(class_mention, 'hasSlotMention')\n has_slot_mention_classification.set('id', slot_mention_classification_id)\n\n string_slot_mention_classification = Element('stringSlotMention')\n # TO RETURN\n elements_to_rtn.append(string_slot_mention_classification)\n string_slot_mention_classification.set('id', slot_mention_classification_id)\n mention_slot_classification = SubElement(string_slot_mention_classification, 'mentionSlot')\n mention_slot_classification.set('id', 'classification')\n string_slot_mention_value_classification = SubElement(string_slot_mention_classification, 'stringSlotMentionValue')\n string_slot_mention_value_classification.set('value', self.attributes['ssi_class'])\n\n\n\n\n return elements_to_rtn\n #return annotation_body, class_mention"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns `seq` as tuple or the singular element. Which is returned is determined by how the AttentionMechanism(s) were passed to the constructor.
|
def _item_or_tuple(self, seq):
t = tuple(seq)
if self._is_multi:
return t
else:
return t[0]
|
[
"def MakeSeq( val ):\n return val if IsSeq( val ) else ( val, )",
"def _get_simple_sequence_2tuple(self):\n simple_sequence = [\n 1234, 1.234, '1234', datetime.date.today(), datetime.datetime.today(), self._get_test_safe_3tuple[1]\n ]\n result_simple_sequence = [str(v) if not hasattr(v, '__html__') else v.__html__() for v in simple_sequence]\n return simple_sequence, result_simple_sequence",
"def maybe_single(sequence):\n try:\n (single,) = sequence\n except ValueError:\n return sequence\n return single",
"def as_seq(x, seq_type=None):\n if x is None:\n # None represents an empty sequence\n x = []\n elif not isinstance(x, (list, tuple, set, frozenset, dict)):\n # if x is not already a sequence (including dict), then make it one\n x = [x]\n\n if seq_type is not None and not isinstance(x, seq_type):\n # if necessary, convert x to the sequence type\n x = seq_type(x)\n\n return x",
"def seq_list(self):\n if not self.__seq_list:\n return None\n\n if self.combine_seq:\n # wrap in list, since accessor wants a list\n return [self.__seq_list[-1]]\n return self.__seq_list",
"def getSequence(self, loc=None, **kargs):\n raise NotImplementedError",
"def force_tuple(value: Union[_T, Sequence[_T]]) -> Sequence[_T]:\n if value is None:\n return ()\n if isinstance(value, tuple):\n return value\n if isinstance(value, list):\n return tuple(value)\n return (value,)",
"def extract_annotation(self, sequence):\n annotation = self._extractor.predict(sequence)\n return annotation.flatten()",
"def first(seq): # real signature unknown; restored from __doc__\n pass",
"def _seq_from_struct(self):\n seq = []\n ch = self.structure[0][0][4]\n fasta = ''\n for atom in self.structure[0]:\n if atom[2] == ' CA ':\n if atom[4] == ch:\n fasta += AA_code(atom[3])\n else:\n seq.append(fasta)\n ch = atom[4]\n fasta = AA_code(atom[3])\n seq.append(fasta)\n return seq",
"def single(seq):\n #Grab the first, if it is empty the iterator will raise.\n #If the iterator doesn't raise after grabbing the second,\n # raise ourselves since seq should only have one item.\n iterator = iter(seq)\n try:\n result = next(iterator)\n except StopIteration:\n # In MayaGUI, it has SUPER strange behavior\n # (not reproducible in mayabatch) where the StopIteration\n # happens but it doesn't actually raise-\n # the execution of the script or command stops but no error\n # is apparent (for example, 'f = single([])' in the script editor\n # will not apppear to raise, but f will be unbound.\n # Catching and reraising gets around this.\n raise\n try:\n next(iterator)\n except StopIteration:\n return result\n raise StopIteration('Sequence has more than one item.')",
"def second(seq): # real signature unknown; restored from __doc__\n pass",
"def get_seq_aa(chain):\n aa_residues = []\n seq = \"\"\n\n for residue in chain.get_residues():\n aa = residue.get_resname()\n if not is_aa(aa) or not residue.has_id('CA'):\n continue\n elif aa == \"UNK\":\n seq += \"X\"\n aa_residues.append(residue)\n else:\n seq += Polypeptide.three_to_one(residue.get_resname())\n aa_residues.append(residue)\n\n return seq, aa_residues",
"def sequence():\n return project().molecule_list()[0].sequence.one_letter_codes",
"def _copy_sequence(self, sequence, name, item_types=None):\n try:\n result = tuple(sequence)\n except TypeError:\n raise TypeError(\"%s object must be a sequence.\" % name)\n\n if not item_types: return result\n\n invalid = [c for c in result if not isinstance(c, item_types)]\n if invalid:\n raise TypeError(\"%s object must contain only %s types.\"\n \" (Received %s)\" % (name, item_types, result))\n return result",
"def seq2chain(*args):\n if len(args) == 1:\n name = 'seq'\n seq = args[0]\n elif len(args) == 2:\n name = args[0]\n seq = args[1]\n else:\n raise ValueError(\"Give either name,seq or just seq\")\n \n return ImmuneChain(seq=Seq(seq,generic_dna),id=name)",
"def is_sequence(x):\n return isinstance(x, (tuple, list, set, numpy.ndarray))",
"def get_seq_and_anno(dataset: Literal[\"train\", \"test\"]) -> tuple[list[str], list[list[str]]]:\n with open(f\"{DEEPGO_DATA_PATH}/{dataset}_data.pkl\", \"rb\") as f:\n data = pickle.load(f)\n # filter to sequences below maximum length\n seqs = data[(data.sequences.str.len() <= MAX_LEN)][\"sequences\"].values.tolist()\n seq_annos = data[(data.sequences.str.len() <= MAX_LEN)][\"annotations\"].values.tolist()\n return seqs, seq_annos",
"def get_seq_list(seq_arg):\n\tpep_sequences = []\n\tfor inp in seq_arg:\n\t\tif '.' in inp:\n\t\t\t# If input is a file\n\t\t\tlis = readfile(inp) \n\t\t\tif len(lis) == 1:\n\t\t\t\t# If all sequences are listed horizontally on one line\n\t\t\t\t# rather than one per line, rearrange\n\t\t\t\tlis = lis[0].split()\n\n\t\t\tfor i in lis:\n\t\t\t\tpep_sequences.append(i.strip())\n\n\t\telse:\n\t\t\t# Sequence was typed directly into the argument\n\t\t\tpep_sequences.append(inp.strip())\n\n\t\tfor n, s in enumerate(pep_sequences):\n\t\t\tif len(s) == 5: # Hard code for HCV\n\t\t\t\tpep_sequences[n] = 'A' + s + 'CSMHL'\n\t\t\telif len(s) == 6: # Hard code for HCV\n\t\t\t\tpep_sequences[n] = 'A' + s + 'SMHL'\n\t\t\telse:\n\t\t\t\tassert len(s) == 11\n\n\treturn pep_sequences",
"def outcome_seq(self):\n seq = []\n for num, ch in enumerate(self.chains.keys()):\t\t\t\t#('A', 'B')\n resi_list = self.residues[num]\n s = list(self.pdb_seq[self.pdb_chains[ch][2]])\n# print(\"struct: \", self.seq[self.chains[ch][2]])##############################\n# print(\"seq : \", \"\".join(s))###########################\n# print(len(self.seq[self.chains[ch][2]]), len(s), len(resi_list))#########################\n if len(s) != len(self.seq[self.chains[ch][2]]):\n for aa in range(0, len(s)):\n if s[aa] != resi_list[aa][2]:\n print(\"ERROR: seq_from_struct + missing_resi differ from seq_from_header at position %s\" %aa)\n if resi_list[aa][4] == 'm':\n s[aa] = '-'\n seq.append(\"\".join(s))\n# print(\"out_s : \", seq[0])#######################################\n return seq"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The `state_size` property of `JointAttentionWrapper`.
|
def state_size(self):
return JointAttentionWrapperState(
cell_state=self._cell.state_size,
time=tensor_shape.TensorShape([]),
encoder_attention=self._attention_layer_size,
decoder_attention=self._cell.state_size,
decoder_states=[],
encoder_alignments=self._item_or_tuple(a.alignments_size for a in self._attention_mechanisms),
decoder_alignments=self._item_or_tuple(a.alignments_size for a in self._attention_mechanisms),
encoder_alignment_history=self._item_or_tuple(() for _ in self._attention_mechanisms),
decoder_alignment_history=self._item_or_tuple(() for _ in self._attention_mechanisms)
)
|
[
"def state_size(self):\n return self._state_size",
"def state_size(self):\n #############################################\n # TODO: YOUR CODE HERE #\n #############################################\n params = self.params\n return params[0]+params[1]\n \n #raise NotImplementedError('Please edit this function.')",
"def state_size(self):\n return ExtractronDecoderCellState(\n cell_state=self._cell._cell.state_size,\n time=tensor_shape.TensorShape([]),\n )",
"def state_dim(self) -> int:\n return self.backbone.state_dim",
"def get_state_shape(s):\n c = _concat(batch_size, s)\n size = tf.random_uniform(c, dtype=dtype)\n return size",
"def state_shape(self):\n pass",
"def info(self):\n return self.state_size, self.action_size",
"def state_shape(self, batch_size):\n return [self.num_layers * self.num_dirs, batch_size, self.num_units],",
"def model_size(self):\n return self._model_size",
"def sizing(self):\n return self.__sizing",
"def state_shape(self, batch_size):\n return ([self.num_layers * self.num_dirs, batch_size, self.num_units],\n [self.num_layers * self.num_dirs, batch_size, self.num_units])",
"def robots_state_length(self):\n self.lock.acquire()\n number = len(self.robots_state)\n self.lock.release()\n return number",
"def SizeOfStiffnessMatrix(self):\n\t\tpass",
"def rf_size(self):\n pass",
"def object_size(self):\n ret = self._get_attr(\"objectSize\")\n return ret",
"def validation_patch_size(self):\n return self._validation_patch_size",
"def size(self) -> Dict[str, int]:\n return __len__()",
"def number_of_states(self) -> int:\n return len(self.states)",
"def training_patch_size(self):\n return self._training_patch_size"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return an initial (zero) state tuple for this `JointAttentionWrapper`. NOTE Please see the initializer documentation for details of how to call `zero_state` if using an `JointAttentionWrapper` with a `BeamSearchDecoder`.
|
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
if self._initial_cell_state is not None:
cell_state = self._initial_cell_state
else:
cell_state = self._cell.zero_state(batch_size, dtype)
error_message = (
"When calling zero_state of JointAttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and the requested batch size. Are you using "
"the BeamSearchDecoder? If so, make sure your encoder output has "
"been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and "
"the batch_size= argument passed to zero_state is "
"batch_size * beam_width.")
with ops.control_dependencies(
self._batch_size_checks(batch_size, error_message)):
cell_state = nest.map_structure(
lambda s: array_ops.identity(s, name="checked_cell_state"),
cell_state)
print('State size', self._cell)
return JointAttentionWrapperState(
cell_state=cell_state,
time=array_ops.zeros([], dtype=dtypes.int32),
encoder_attention=_zero_state_tensors(self._attention_layer_size, batch_size,
dtype),
# add decoder attention
decoder_attention=_zero_state_tensors(self._attention_layer_size, batch_size,
dtype),
encoder_alignments=self._item_or_tuple(
attention_mechanism.initial_alignments(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms),
decoder_alignments=self._item_or_tuple(array_ops.one_hot(
array_ops.zeros((batch_size,), dtype=dtypes.int32), 1,
dtype=dtype) for _ in self._attention_mechanisms),
decoder_states=self._item_or_tuple(
[array_ops.zeros((batch_size, self._cell.output_size))] for _ in self._attention_mechanisms),
encoder_alignment_history=self._item_or_tuple(
tensor_array_ops.TensorArray(dtype=dtype, size=0,
dynamic_size=True)
if self._alignment_history else ()
for _ in self._attention_mechanisms),
decoder_alignment_history=self._item_or_tuple(
tensor_array_ops.TensorArray(dtype=dtype, size=0,
dynamic_size=True)
if self._alignment_history else ()
for _ in self._attention_mechanisms),
)
|
[
"def _get_zero_state(source_cell_fw, source_cell_bw):\n zero_fw = source_cell_fw.zero_state(self.batch_length, self.float_type)\n zero_bw = source_cell_bw.zero_state(self.batch_length, self.float_type)\n return zero_fw, zero_bw",
"def get_initial_state(self):\n raise NotImplementedError",
"def get_initial_state(self):\n return self._initial_state",
"def initial_sample_state(self, param_groups):\n return nest.map_structure(torch.zeros_like, param_groups)",
"def initial_state(self):\r\n return [None for _ in range(self.n_layers)]",
"def get_initial_state(lstm_layer_1,\n lstm_layer_2,\n batch_size):\n return lstm_layer_1.zero_state(batch_size,\n tf.float32), \\\n lstm_layer_2.zero_state(batch_size,\n tf.float32)",
"def create_default(cls) -> StateStats:\n return cls(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)",
"def create_default_question_state(cls):\n return state_domain.State.create_default_state(\n None, is_initial_state=True)",
"def restore_init_state(self):\n self.curr_mask_index = 0\n self.mask_count = 0",
"def get_initial_state_vector(self):\n\n return self.QVector[0]",
"def prepare_zero_state(self, qubit):\n\n program = Program()\n program += I(qubit)\n return program",
"def initialize_hidden_state(self):\n initializer = tf.keras.initializers.Zeros()\n values = initializer(shape=(self.batch, self.units))\n\n return values",
"def init(self, state: 'SoState') -> \"void\":\n return _coin.SoNormalElement_init(self, state)",
"def initial_global_state(self):\n\n return ()",
"def get_starting_state(self):\n\t\treturn self._current_state # state 0",
"def init(self, state: 'SoState') -> \"void\":\n return _coin.SoNormalBindingElement_init(self, state)",
"def initialState(self):\n initialstate = {\"class\": self.__class__.__name__,\n \"name\": self.name,\n \"description\": self.description,\n \"space\": self.space.introduction(),\n \"gameobjects\": [obj.introduction()\n for obj in self.gameobjects\n ],\n \"teams\": [team.introduction()\n for team in self.teams\n ],\n \"isdiscrete\": self.isdiscrete,\n \"polls\": [poll.introduction()\n for poll in self.polls\n ],\n \"lastturn\": self.lastturn,\n \"turncount\": self.turncount\n }\n return initialstate",
"def initial_sample_state(self, param_groups):\n return np_nest.map_structure(np.zeros_like, param_groups)",
"def init(self, state: 'SoState') -> \"void\":\n return _coin.SoModelMatrixElement_init(self, state)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Assert the types of inputs are the same
|
def assert_same_type(*inputs) -> bool:
first, *others = inputs
# single input
if not others:
return True
_class = type(first)
for ix, obj in enumerate(others):
if not isinstance(obj, _class):
raise TypeError(f"Input types don't agree. This method accepts multiple inputs, "
f"type of the first input: {type(first)}, "
f"but {ix+1}-th input: {type(obj)}")
return True
|
[
"def test_equal_on_type_mismatch(self):\n a = payloads.DeriveKeyRequestPayload()\n b = \"invalid\"\n\n self.assertFalse(a == b)\n self.assertFalse(b == a)",
"def test_inputs(self):\n r = ResultError('bob', 'a', '1', 'xxxx', 'val', [\n ('b', '1', 'xxxx', 'hash'),\n ['c', '1', 'xxxx', 'hash'],\n ResultInput('d', '1', 'xxxx', 'hash'),\n ])\n self.assertEqual(r.inputs, (\n ResultInput('b', '1', 'xxxx', 'hash'),\n ResultInput('c', '1', 'xxxx', 'hash'),\n ResultInput('d', '1', 'xxxx', 'hash'),\n ), \"Should convert all arguments to a ResultInput\")",
"def test_type_eq(self) -> None:\n self.assertTrue(self.type1 == self.type1copy)\n\n # pylint: disable=unnecessary-dunder-call\n self.assertTrue(self.type1.__eq__(self.type1copy))",
"def test_equal_on_type_mismatch(self):\n a = payloads.DeriveKeyResponsePayload()\n b = \"invalid\"\n\n self.assertFalse(a == b)\n self.assertFalse(b == a)",
"def test_inputs(self):\n r = Result('bob', 'a', '1', 'xxxx', 'val', [\n ('b', '1', 'xxxx', 'hash'),\n ['c', '1', 'xxxx', 'hash'],\n ResultInput('d', '1', 'xxxx', 'hash'),\n ])\n self.assertEqual(r.inputs, (\n ResultInput('b', '1', 'xxxx', 'hash'),\n ResultInput('c', '1', 'xxxx', 'hash'),\n ResultInput('d', '1', 'xxxx', 'hash'),\n ), \"Should convert all arguments to a ResultInput\")",
"def verify(t1, t2):\n if not isinstance(t1, coretypes.Mono) or not isinstance(t2, coretypes.Mono):\n if t1 != t2:\n raise UnificationError(\"%s != %s\" % (t1, t2))\n return\n\n args1, args2 = t1.parameters, t2.parameters\n tcon1, tcon2 = type_constructor(t1), type_constructor(t2)\n\n if tcon1 != tcon2:\n raise UnificationError(\n \"Got differing type constructors %s and %s\" % (tcon1, tcon2))\n\n if len(args1) != len(args2):\n raise UnificationError(\"%s got %d and %d arguments\" % (\n tcon1, len(args1), len(args2)))",
"def test_assertSameItems(self):\n x = 0\n y = 'abcdef'\n z = 3\n y1 = 'abc' + 'def'\n z1 = 3.0\n \n y_id = id(y)\n z_id = id(z)\n y1_id = id(y1)\n z1_id = id(z1)\n \n self.assertSameItems([x,y,z], [x,y,z])\n self.assertSameItems([x,y,z], [z,x,y])\n self.assertSameItems('', '')\n self.assertSameItems([x,y,z], (x,y,z))\n \n try:\n self.assertSameItems([x,y,z], [x,y,z,y])\n except:\n message = str(exc_info()[1])\n self.assertEqual(message,\n 'Observed and expected are different lengths: 3 and 4')\n else:\n raise AssertionError, \\\n \"unit_test.assertSameItems failed on input %s and %s\" \\\n % (`[x,y,z]`, `[x,y,z,y]`)\n\n try:\n first_list = [x,y,z]\n second_list = [y,x,z1]\n self.assertSameItems(first_list, second_list)\n except self.failureException:\n pass\n else:\n raise AssertionError, \\\n \"unit_test.assertEqualItems failed on input %s and %s\" \\\n % (`[x,y,z]`, `[y,x,z1]`)\n \n # assert y is not y1\n # try:\n # self.assertSameItems([y], (y1,))\n # except self.failureException:\n # pass\n # else:\n # raise AssertionError, \\\n # \"unit_test.assertEqualItems failed on input %s and %s\" \\\n # % (`[y]`, `(y1,)`)",
"def test_equal(self):\n composerA = Composer(TextType(\"Henry Mancini\"))\n composerB = Composer(TextType(\"Jerry Goldsmith\"))\n composerC = Composer(TextType(\"Henry Mancini\"))\n\n self.assertNotEqual(composerA, composerB)\n self.assertNotEqual(composerB, composerC)\n self.assertEqual(composerA, composerC)\n # wrong type\n self.assertFalse(composerA.__eq__(1234))",
"def test_any_type(self):\n\n @typecheck(int, None)\n def to_string(x, y):\n x = y\n return str(x)\n\n try:\n to_string(1, 9)\n except InvalidArgumentType:\n self.fail(\"Failed typecheck while it shouldn't have, given the first argument has the correct type and no type check should be performed on the second argument.\")",
"def check_identical_to(self, other: 'Type') -> None:\n if not self.is_identical_to(other):\n raise TypesNotIdenticalError(self, other)",
"def test_check_types(self):\n self.assertTrue(attributes.AnyAttr.check_type(''))\n self.assertTrue(attributes.AnyAttr.check_type(2))\n self.assertTrue(attributes.AnyAttr.check_type(2.))\n self.assertTrue(attributes.AnyAttr.check_type(()))",
"def assertTypedTupleEquals(self, expected, actual):\n self.assertTupleEqual(expected, actual)\n self.assertListEqual(list(map(type, expected)), list(map(type, actual)))",
"def __validate(type1, type2):\n if not isinstance(type1, type2):\n raise ExchangeError('Type mismatch {}'.format((type1, type2)))",
"def assert_is_similar(cls, expected, inferred):\n\n ERROR_URL_298 = \"https://github.com/opendp/opendp/discussions/298\"\n if isinstance(inferred, UnknownType):\n return\n if isinstance(expected, str) and isinstance(inferred, str):\n if inferred in ATOM_EQUIVALENCE_CLASSES:\n assert expected in ATOM_EQUIVALENCE_CLASSES[inferred], \\\n f\"inferred type is {inferred}, expected {expected}. See {ERROR_URL_298}\"\n else:\n assert expected == inferred, \\\n f\"inferred type is {inferred}, expected {expected}. See {ERROR_URL_298}\"\n\n elif isinstance(expected, RuntimeType) and isinstance(inferred, RuntimeType):\n # allow extra flexibility around options, as the inferred type of an Option::<T>::Some will just be T\n if expected.origin == \"Option\" and inferred.origin != \"Option\":\n expected = expected.args[0]\n\n assert expected.origin == inferred.origin, \\\n f\"inferred type is {inferred.origin}, expected {expected.origin}. See {ERROR_URL_298}\"\n\n assert len(expected.args) == len(inferred.args), \\\n f\"inferred type has {len(inferred.args)} arg(s), expected {len(expected.args)} arg(s). See {ERROR_URL_298}\"\n\n for (arg_par, arg_inf) in zip(expected.args, inferred.args):\n RuntimeType.assert_is_similar(arg_par, arg_inf)\n else:\n # inferred type differs in structure\n raise AssertionError(f\"inferred type is {inferred}, expected {expected}. See {ERROR_URL_298}\")",
"def test_inputs(self):\n\n assert False",
"def type_type_consistent(type_a: Type, type_b: Type) -> bool:\n return type_a == type_b",
"def raise_type_mismatch(ob, expected, **kwargs):\n e = 'Object not of expected type:'\n e += '\\n expected: %s' % str(expected)\n e += '\\n obtained: %s' % str(type(ob))\n e += '\\n' + indent(format_obs(kwargs), ' ')\n raise ValueError(e)",
"def test_inputs(self):\n w = Work('bob', 'a', '1', 'xxxx', [\n ('b', '1', 'xxxx', 'val', 'hash'),\n ['c', '1', 'xxxx', 'val', 'hash'],\n WorkInput('d', '1', 'xxxx', 'val', 'hash'),\n ])\n self.assertEqual(w.inputs, (\n WorkInput('b', '1', 'xxxx', 'val', 'hash'),\n WorkInput('c', '1', 'xxxx', 'val', 'hash'),\n WorkInput('d', '1', 'xxxx', 'val', 'hash'),\n ), \"Should convert all arguments to a WorkInput\")",
"def _assert_validity_of_inputs(self):\n for item in [\"frequency\", \"Dt\"]:\n if isinstance(self.__getattribute__(item), bool):\n raise TypeError(f\"Parameter '{item}' must be numeric.\")\n if not isinstance(self.__getattribute__(item), (int, float)):\n raise TypeError(f\"Parameter '{item}' is not a non-zero number.\")\n if self.__getattribute__(item) <= 0.0:\n raise ValueError(f\"Parameter '{item}' must be a non-zero number.\")\n for item in ['q0', 'P', 'R']:\n if self.__getattribute__(item) is not None:\n if isinstance(self.__getattribute__(item), bool):\n raise TypeError(f\"Parameter '{item}' must be an array of numeric values.\")\n if not isinstance(self.__getattribute__(item), (list, tuple, np.ndarray)):\n raise TypeError(f\"Parameter '{item}' is not an array. Got {type(self.__getattribute__(item))}.\")\n self.__setattr__(item, np.copy(self.__getattribute__(item)))\n if self.q0 is not None:\n if self.q0.shape != (4,):\n raise ValueError(f\"Parameter 'q0' must be an array of shape (4,). It is {self.q0.shape}.\")\n if not np.allclose(np.linalg.norm(self.q0), 1.0):\n raise ValueError(f\"Parameter 'q0' must be a versor (norm equal to 1.0). Its norm is equal to {np.linalg.norm(self.q0)}.\")\n for item in ['P', 'R']:\n if self.__getattribute__(item).ndim != 2:\n raise ValueError(f\"Parameter '{item}' must be a 2-dimensional array.\")\n m, n = self.__getattribute__(item).shape\n if m != n:\n raise ValueError(f\"Parameter '{item}' must be a square matrix. It is {m}x{n}.\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
For change or add user portrait
|
def change_user_portrait(user_id):
user = User.query.filter(User.user_id == user_id).first()
if not user:
return render_template("404.html"), 404
if request.method == "POST":
if "file" not in request.files:
flash("No file part")
return redirect(request.url)
file = request.files["file"]
if file.filename == "":
flash("No selected file")
return redirect(request.url)
if not change_portrait(user_id, file) == "Success":
flash("Bad file. Only jpg allowed.")
return redirect(request.url)
return redirect(f"/{user_id}")
return render_template("change_user_portrait.html")
|
[
"def to_portrait(self) -> None:\n if self.is_landscape:\n self.width, self.height = self.height, self.width",
"def yourportrait():\n\n return render_template(\n 'your_portrait.html',\n your_portraitActive='active'\n )",
"def change_orientation_portrait(self):\n\n self.mob_conn.orientation = 'PORTRAIT'\n return self",
"def set_landscape_parameters(self, landscape, params):",
"def change_user_type(self):\n user_origin = CustomUser.objects.get(id=self.id)\n if user_origin.user_type != self.user_type:\n ChildProfile.objects.filter(user=self).delete()\n ParentProfile.objects.filter(user=self).delete()\n InstructorProfile.objects.filter(user=self).delete()\n if user_origin.user_type == 1:\n if self.user_type == 2:\n ChildProfile.objects.get_or_create(user=self)\n elif self.user_type == 3:\n ParentProfile.objects.get_or_create(user=self)\n elif self.user_type == 4:\n InstructorProfile.objects.get_or_create(user=self)\n elif user_origin.user_type == 2:\n if self.user_type == 3:\n ParentProfile.objects.get_or_create(user=self)\n elif self.user_type == 4:\n InstructorProfile.objects.get_or_create(user=self)\n elif user_origin.user_type == 3:\n if self.user_type == 2:\n ChildProfile.objects.get_or_create(user=self)\n elif self.user_type == 4:\n InstructorProfile.objects.get_or_create(user=self)\n elif user_origin.user_type == 4:\n if self.user_type == 2:\n ChildProfile.objects.get_or_create(user=self)\n elif self.user_type == 3:\n ParentProfile.objects.get_or_create(user=self)",
"def set_user_role(data):\n g.is_editor = data == \"editor\"",
"def is_portrait(self) -> bool:\n\n\t\treturn self.width < self.height",
"def to_landscape(self) -> None:\n if self.is_portrait:\n self.width, self.height = self.height, self.width",
"def setUpOrientation(node, parmname, defaultup):\n pass",
"def set_orientation(self, value):\n self._selenium_web_driver().orientation = value.upper()",
"def is_portrait(self) -> bool:\n return self.width <= self.height",
"def adjustRotation(self, photo):\n if photo.exif.orientation not in (0, 1):\n # There is somthing to do\n if photo.exif.orientation == 6:\n # rotate 90° clockwise\n # AND LOOSE EXIF DATA\n self.rotatephoto(photo, -90)\n if photo.exif.orientation == 8:\n # rotate 90° counterclockwise\n # AND LOOSE EXIF DATA\n self.rotatephoto(photo, 90)",
"def test_portrait_check():\n portrait_angles = [90, 270, -90]\n landscape_angles = [0, 180, -180, 360]\n\n for angle in portrait_angles:\n compass = orientation.Compass()\n compass.set_angle(angle)\n assert compass.is_portrait_frame()\n assert not compass.is_landscape_frame()\n\n for angle in landscape_angles:\n compass = orientation.Compass()\n compass.set_angle(angle)\n assert compass.is_landscape_frame()\n assert not compass.is_portrait_frame()",
"def orientation(self):\r\n tag=self.readinfo('Image Orientation Patient')\r\n \r\n if tag==None:\r\n name=None\r\n elif tag==[-0,1,0,-0,-0,-1]:\r\n name=1 #Sagittal\r\n elif tag==[-1,-0,0,-0,-1,0]:\r\n name=2 #Axial\r\n elif tag==[1,0,0,0,0,-1]:\r\n name=3 #Coronal\r\n else:\r\n name=4 #Oblique\r\n self.orient=name\r\n return",
"def changeorient(player, touche):\n\n\tif touche == \"d\": # rotation a droite\n\t\t\tplayer[1] = (player[1] + 10)%360 \n\t\t\n\telif touche == \"g\": # rotation a gauche \n\t\tplayer[1] = (player[1] - 10)%360",
"def set_perms(self, request, *args, **kwargs):\n self.user = request.user\n if isinstance(self.user, User):\n self.user = self.user.child\n self.is_student_user = self.user.groups.filter(name=\"students\").exists()\n self.is_teacher_user = self.user.groups.filter(name=\"teachers\").exists()\n self.is_admin_user = self.user.groups.filter(name=\"odalc_admins\").exists()\n else:\n self.is_student_user = False\n self.is_teacher_user = False\n self.is_admin_user = False",
"def is_portrait(self):\n pj = self.kna[0] - self.krb[0] # panjang\n lb = self.kna[1] - self.krb[1] # lebar\n if pj < lb:\n return True\n else:\n return False",
"def test_write_page_setup_portrait(self):\n\n self.worksheet.set_portrait()\n\n self.worksheet._write_page_setup()\n\n exp = \"\"\"<pageSetup orientation=\"portrait\"/>\"\"\"\n got = self.fh.getvalue()\n\n self.assertEqual(got, exp)",
"def decide_orientation(width, height):\n if width < height:\n return Orientation.HORIZONTAL\n elif width > height:\n return Orientation.VERTICAL\n else:\n return Orientation.HORIZONTAL if random.randint(0, 1) == 0 else Orientation.VERTICAL"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Computes how much slack time the bus can use assuming its next checkpoint is `nxt_chk_id`. This is based on the formula in the MAST paper.
|
def usable_slack_time(self, t, nxt_chk_id, chkpts):
init_slack = self.init_slack_times[nxt_chk_id]
avail_slack = self.avail_slack_times[nxt_chk_id]
next_chk = chkpts[nxt_chk_id]
prev_chk = chkpts[nxt_chk_id - 1]
t_now = t - self.start_t
if t_now < prev_chk.dep_t:
return min(avail_slack, init_slack * cf.MIN_INIT_SLACK)
elif t_now > next_chk.dep_t:
return 0
# just straight from the MAST paper
# essentially a fraction based on how
# close to the next checkpoint we are
usable_slack = init_slack * (1 + (cf.MIN_INIT_SLACK - 1) * (1 - ((t_now - prev_chk.dep_t) / (chkpts[1].dep_t))))
return min(avail_slack, usable_slack)
|
[
"def AvgOneWaitTime(self, chk, rng=5):\n tot = 0\n count = 0\n\n for num in range(rng):\n try:\n tot += int(self.CheckpointWaitTimes[chk-1][\"WaitTimes\"][num][\"WaitTime\"])\n count += 1\n except:\n # print \"Not enough wait times reported by TSA for that checkpoint\"\n pass\n # Multiply TSA values by 10 minute increments, in seconds (10*60=600)\n newTot = math.ceil(tot / float(count)) * 600\n return int(newTot)",
"def fraction_time_waiting_on_shuffle_read(self):\n total_fetch_wait = 0\n # This is just used as a sanity check: total_runtime_no_shuffle_read + total_fetch_wait\n # should equal total_runtime.\n total_runtime_no_shuffle_read = 0\n total_runtime = 0\n for id, stage in self.stages.iteritems():\n total_fetch_wait += stage.total_fetch_wait()\n total_runtime_no_shuffle_read += stage.total_runtime_no_remote_shuffle_read()\n total_runtime += stage.total_runtime()\n assert(total_runtime == total_fetch_wait + total_runtime_no_shuffle_read)\n return total_fetch_wait * 1.0 / total_runtime",
"def compute_weak_subjectivity_period(N: uint64, t: Ether) -> uint64:\n ws_period = MIN_VALIDATOR_WITHDRAWABILITY_DELAY\n # N = len(get_active_validator_indices(state, get_current_epoch(state)))\n # t = get_total_active_balance(state) // N // ETH_TO_GWEI\n T = MAX_EFFECTIVE_BALANCE // ETH_TO_GWEI\n delta = get_validator_churn_limit(N)\n MAX_DEPOSITS = 1024\n Delta = MAX_DEPOSITS * SLOTS_PER_EPOCH\n D = SAFETY_DECAY\n\n if T * (200 + 3 * D) < t * (200 + 12 * D):\n epochs_for_validator_set_churn = (\n N * (t * (200 + 12 * D) - T * (200 + 3 * D)) // (600 * delta * (2 * t + T))\n )\n epochs_for_balance_top_ups = (\n N * (200 + 3 * D) // (600 * Delta)\n )\n ws_period += max(epochs_for_validator_set_churn, epochs_for_balance_top_ups)\n else:\n ws_period += (\n 3 * N * D * t // (200 * Delta * (T - t))\n )\n \n return ws_period",
"def OneCheckpointWaitTimes(self, chk):\n return self.CheckpointWaitTimes[chk-1]",
"def calc_thrust_power(block_count):\n return block_count / 0.03",
"def slowestWaitTimeNow(self, rng=5):\n numCheckpoints = self.TSAairportDict[self.airportCode].numCheckpoints()\n try:\n slowestTime = self.CheckpointWaitTimes[0][\"WaitTimes\"][0][\"WaitTime\"]\n for checkpoint in range(numCheckpoints):\n if self.CheckpointWaitTimes[checkpoint][\"WaitTimes\"][0][\"WaitTime\"] > slowestTime:\n slowestTime = self.CheckpointWaitTimes[checkpoint][\"WaitTimes\"][0][\"WaitTime\"]\n except:\n # print \"one or more checkpoints did not have any wait times reported\"\n slowestTime = 0\n return int(slowestTime) * 600",
"def _seconds_since_check(self) -> int:\n if self.update_time is None:\n return 0\n return int(time.time()) - self.update_time",
"def timeRequiredToBuy(self, tickets: List[int], k: int) -> int:\n ret = 0\n while tickets[k] > 0:\n for i in range(len(tickets)):\n if tickets[i] > 0:\n tickets[i] -= 1\n ret += 1\n if i == k and tickets[k] == 0:\n break\n return ret",
"def estimate_number_of_MC_steps_left( self ):\n return self.wl_sim.Nbins/self.fmin - self.wl_sim.iter",
"def _calc_sleep_time(self, retry_index):\n # If we have already passed the retry index that would return the max timeout\n # then there is no reason to calculate the timeout.\n if self._max_retry_index is not None and self._max_retry_index <= self._retry_count:\n return self._max_retry_time\n\n # Calculate the timeout\n else:\n timeout = self._retry_coeff * math.factorial(retry_index)\n if timeout >= self._max_retry_time:\n self._max_retry_index = retry_index + 1\n return self._max_retry_time\n else:\n return timeout",
"def calc_crack_width(self):\n k_t = 0.6 # no unit\n E_p = 195e3 # [MPa = N/mm2]\n A_p = 1000 # [mm**2] ###10000 #?? # 4.5m x 0.3m = 1.35m2 ?\n A_pn = (1. - self.nr_broken / self.nr_wires) * A_p\n cw = (1. - k_t) * self.stress_range ** 2 * A_pn\n cw = cw / ( 0.72 * np.pi * self.fctm * E_p * np.sqrt(A_pn))\n return cw #[mm]",
"def warmup(self):\n\t\treturn int(self._warmup/self.tick_period) * self.tick_period",
"def calc_time_cost(path: List[State]) -> int:\n return path[-1].time_step - path[0].time_step",
"def calculateKmsLeft(self):\n\n return int(self.getVehicleAutonomy()) - int(self.getVehicleKmsDone())",
"def determineThrottle(self, state):\n\n eps = self.epsilon\n n = 1 if state.size == 14 else state.shape[1]\n\n throttle = np.zeros(n)\n S = self.switchingFunction(state)\n S = S.reshape(n)\n\n for i, s in enumerate(S):\n if eps > 0:\n midthrottle = (eps - s) / (2 * eps)\n throttle[i] = 0 if s > eps else 1 if s < -eps else midthrottle\n else:\n throttle[i] = 0 if s > eps else 1\n\n return throttle",
"def calc_sleep(self, attempt):\n return min(10, pow(2, attempt))",
"def getNrTimesteps():\n\n timesteps = 25\n return timesteps",
"def n_timesteps(self) -> int:\n if self.total_time < self.timestep:\n warnings.warn(\n f\"No simulation possible: you asked for {self.total_time} \"\n f\"simulation time but the timestep is {self.timestep}\"\n )\n return floor(self.total_time.total_seconds() / self.timestep.total_seconds())",
"def calculate_time_left_to_spare_on_loss_of_view(self, trial_detail):\n\t\t\n\t\t# Get the time the participant last entered the road, and the closest car at that time\n\t\tlast_entered_road = trial_detail.get_moment_last_entered_road()\n\t\tif not last_entered_road:\n\t\t\treturn NO_VALUE_NUM\n\t\t\n\t\tt_last_entered_road = last_entered_road.get_time()\n\t\tnext_car_to_participant = trial_detail.get_closest_car(t_last_entered_road, Direction.RIGHT)\t\n\t\tparticipant = trial_detail.get_participant()\n\t\t\n\t\t# loop backwards until the next_car becomes visible\n\t\tt = t_last_entered_road\n\t\twhile not self._is_car_visible(next_car_to_participant, participant, t):\n\t\t\t\n\t\t\tprev_moment = participant.get_prev_moment(t)\n\t\t\tif not prev_moment:\n\t\t\t\treturn NO_VALUE_NUM \n\t\t\tt = prev_moment.get_time()\n\t\t\t\n\t\t\tclosest_car = trial_detail.get_closest_car(t, Direction.RIGHT)\n\t\t\tif closest_car is not next_car_to_participant:\n\t\t\t\treturn -NO_VALUE_NUM # negative \"infinite\" because they never saw the car\n\t\t\t\t\n\t\t# was the car ever out of view?\n\t\tif self._approx_equal(t_last_entered_road, t, delta=0.001):\n\t\t\treturn NO_VALUE_NUM # positive \"infinite\" because they saw the car as they entered\n\t\t\n\t\t# when would the car arrive at the participant?\n\t\ttls_on_loss_of_view = next_car_to_participant.get_time_from_x(t, last_entered_road.get_x_position())\n\t\treturn tls_on_loss_of_view"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given distributed arrays with the lengths and offsets of groups in an array of particle IDs, compute the group index corresponding to each particle ID.
|
def group_index_from_length_and_offset(length, offset, nr_local_ids,
return_rank=False, comm=None):
if comm is None:
from mpi4py import MPI
comm = MPI.COMM_WORLD
comm_rank = comm.Get_rank()
comm_size = comm.Get_size()
# Ensure lengths and offsets are signed, 64 bit ints -
# prevents numpy casting to float when mixing signed and unsigned.
length = np.asarray(length, dtype=np.int64)
offset = np.asarray(offset, dtype=np.int64)
# Compute index of each group stored locally
nr_groups_local = len(length)
index_offset = comm.scan(nr_groups_local) - nr_groups_local
index = np.arange(nr_groups_local, dtype=np.int64) + index_offset
# Find range of particle IDs stored on each rank
first_id_offset_local = comm.scan(nr_local_ids) - nr_local_ids
first_id_offset = comm.allgather(first_id_offset_local)
last_id_offset_local = comm.scan(nr_local_ids) - 1
last_id_offset = comm.allgather(last_id_offset_local)
# Find the range of ranks we need to send each group's length, offset and index
rank_send_offset = -np.ones(comm_size, dtype=int)
rank_send_count = np.zeros(comm_size, dtype=int)
first_rank_to_send_group_to = 0
last_rank_to_send_group_to = -1
for i in range(nr_groups_local):
# Find first rank this group should be sent to
while first_rank_to_send_group_to < comm_size-1 and last_id_offset[first_rank_to_send_group_to] < offset[i]:
first_rank_to_send_group_to += 1
# Find last rank this group should be sent to
while last_rank_to_send_group_to < comm_size-1 and first_id_offset[last_rank_to_send_group_to+1] < offset[i]+length[i]:
last_rank_to_send_group_to += 1
# Accumulate number of groups to send to each rank
for dest in range(first_rank_to_send_group_to, last_rank_to_send_group_to+1):
if rank_send_offset[dest] < 0:
rank_send_offset[dest] = i
rank_send_count[dest] += 1
# Find number of groups to receive on each rank and offset into receive buffers
rank_recv_count = np.empty_like(rank_send_count)
comm.Alltoall(rank_send_count, rank_recv_count)
rank_recv_offset = np.cumsum(rank_recv_count) - rank_recv_count
# Construct receive buffers
nr_recv = np.sum(rank_recv_count)
length_recv = np.ndarray(nr_recv, dtype=length.dtype)
offset_recv = np.ndarray(nr_recv, dtype=offset.dtype)
index_recv = np.ndarray(nr_recv, dtype=index.dtype)
# Exchange group lengths, offsets and indexes
ps.my_alltoallv(length, rank_send_count, rank_send_offset,
length_recv, rank_recv_count, rank_recv_offset,
comm=comm)
ps.my_alltoallv(offset, rank_send_count, rank_send_offset,
offset_recv, rank_recv_count, rank_recv_offset,
comm=comm)
ps.my_alltoallv(index, rank_send_count, rank_send_offset,
index_recv, rank_recv_count, rank_recv_offset,
comm=comm)
# Find number of particles on previous MPI ranks
nr_ids_prev = comm.scan(nr_local_ids) - nr_local_ids
# Allocate output arrays
grnr = -np.ones(nr_local_ids, dtype=np.int32)
if return_rank:
rank = -np.ones(nr_local_ids, dtype=np.int32)
# Convert received offsets to local array indexes
i1 = offset_recv - nr_ids_prev
i2 = offset_recv + length_recv - nr_ids_prev
# Negative start index i1 indicates that some particles in the group are on a previous MPI rank
if return_rank:
rank_offset = np.where(i1 < 0, np.abs(i1), 0)
# Ensure all local array indexes are in range
i1[i1 < 0] = 0
i2[i2 > nr_local_ids] = nr_local_ids
# Assign group indexes to local particles
for ind, start, end in zip(index_recv, i1, i2):
if end > start:
grnr[start:end] = ind
# Compute rank of each local particle within its group
if return_rank:
for offset, start, end, num in zip(rank_offset, i1, i2, i2-i1):
if num > 0:
rank[start:end] = np.arange(offset, offset+num, dtype=rank.dtype)
# Return the results
if return_rank:
return grnr, rank
else:
return grnr
|
[
"def get_group_indices(groups_list,group_number):\r\n file = open(groups_list, 'r')\r\n lines=file.readlines()\r\n group_indices = np.fromstring(lines[group_number],sep=\"\\t\", dtype = int)\r\n\r\n return group_indices",
"def get_group_index_lists(group_ids):\n groups = list(set(group_ids))\n r = []\n for group in groups:\n l = []\n for i in range(len(group_ids)):\n if group_ids[i] == group:\n l.append(i)\n r.append(l)\n return r",
"def group_idx(self, x):\n centers = self.centers\n dist = [self.dist_func(x, center) for center in centers]\n dist = np.array(dist)\n group = np.argmin(dist)\n return group",
"def get_group_indices(self, groups='all'):\n\n if self.group_edges is None:\n msg = 'Unable to get energy group indices for groups \"{0}\" since ' \\\n 'the group edges have not yet been set'.format(groups)\n raise ValueError(msg)\n\n if groups == 'all':\n return np.arange(self.num_groups)\n else:\n indices = np.zeros(len(groups), dtype=np.int)\n\n for i, group in enumerate(groups):\n cv.check_greater_than('group', group, 0)\n cv.check_less_than('group', group, self.num_groups, equality=True)\n indices[i] = group - 1\n\n return indices",
"def group_indices(indexes):\n\n diff_inds = np.where(np.abs(np.diff(indexes)) > 1)[0]\n diff_points = np.concatenate(([-1], diff_inds, [len(indexes) - 1]))\n length = diff_points.size\n pairs = np.hstack((diff_points[:-1].reshape(length - 1, 1) + 1, diff_points[1:].reshape(length - 1, 1)))\n # pairs = zip(diff_points[::]+1, diff_points[1::])\n segments = indexes[pairs]\n return np.array(segments)",
"def indices_groups(self):\n if self._indices_groups is None:\n indices = []\n for idx, parameter in enumerate(self.parameters_ordered):\n if isinstance(parameter, ParameterGroup):\n for j in range(len(parameter.key)):\n indices.append(idx)\n else:\n indices.append(idx)\n\n self._indices_groups = np.array(indices, dtype=np.int64)\n # self._indices_groups = tuple(indices)\n\n return self._indices_groups",
"def create_group_index(groups):\n index = {}\n for group in groups:\n for phrase in group:\n index[phrase] = '|'.join(group)\n return index",
"def indicesByPdgId(self,pdgIds,useAbs=True,indices=None):\n result = [ ]\n if type(pdgIds)==type(0):\n pdgIds_ = [ pdgIds ]\n else:\n pdgIds_ = pdgIds\n parts = self.genParts\n if indices!=None:\n parts = [ self.genParts[i] for i in indices ]\n for mp in parts:\n id = mp.particle.pdgId()\n if useAbs:\n id = abs(id)\n if id in pdgIds_:\n result.append(mp.index)\n return result",
"def node_assignment_group(group_ids, batch_ids):\n # Loop over on edges, reset the group IDs of connected node\n joined = np.vstack((group_ids, batch_ids))\n _, unique_ids = np.unique(joined, axis=1, return_inverse=True)\n return unique_ids",
"def _groupindex(self):\n\n # find the max. length of the parameters\n maxdict = {key: len(str(max(val))) for key, val in\n self.param_dyn_dict.items()}\n # find the max. length of the parameters\n\n def doit(x, N):\n return str(x).zfill(N)\n for i, [key, val] in enumerate(self.param_dyn_dict.items()):\n if i == 0:\n conclist = list(map(partial(doit, N=maxdict[key]), val))\n else:\n conclist = map(add, conclist,\n map(partial(doit, N=maxdict[key]), val))\n return np.array(list(map(int, conclist)))",
"def group_point(data: tf.Tensor, indices: tf.Tensor) -> tf.Tensor:\n\n return grouping_module.group_point(data, indices)",
"def _create_group_to_col_position(column_groups):\n if column_groups is not None:\n group_to_col_index = {group: [] for group in list(set(column_groups))}\n for i, group in enumerate(column_groups):\n group_to_col_index[group].append(i)\n else:\n group_to_col_index = None\n return group_to_col_index",
"def get_group_indexes(indexes: Tensor) -> List[Tensor]:\n\n res: dict = {}\n for i, _id in enumerate(indexes):\n _id = _id.item()\n if _id in res:\n res[_id] += [i]\n else:\n res[_id] = [i]\n\n return [tensor(x, dtype=torch.long) for x in res.values()]",
"def group_data_idx_by_image_idx(image_ids_in_caption_data_order):\n image_positions_by_image_id = {}\n for i, img_id in enumerate(image_ids_in_caption_data_order):\n if img_id in image_positions_by_image_id:\n image_positions_by_image_id[img_id].append(i)\n else:\n image_positions_by_image_id[img_id] = [i]\n return image_positions_by_image_id",
"def find_group(self, star, starlist):\n star_distance = np.hypot(star['x_0'] - starlist['x_0'],\n star['y_0'] - starlist['y_0'])\n distance_criteria = star_distance < self.crit_separation\n return np.asarray(starlist[distance_criteria]['id'])",
"def indexing_to_chunk_indices(output_chunk):\n input_indices = [] # index in the chunk of the mutable tensor\n value_indices = [] # index in the chunk of the assigned value\n for d, s in zip(output_chunk.op.indexes, output_chunk.op.inputs[0].shape):\n # expand the index (slice)\n idx = np.r_[slice(*d.indices(s)) if isinstance(d, slice) else d]\n input_indices.append(idx)\n if not isinstance(d, Integral):\n value_indices.append(np.arange(len(idx)))\n return input_indices, value_indices",
"def compute_voxel_group(tensor, group_id):\n assert group_id >= 1 and group_id <= 8\n group_id -= 1\n begin = [0, group_id / 4, group_id / 2 % 2, group_id % 2, 0]\n stride = [1, 2, 2, 2, 1]\n\n dim = len(tensor.shape)\n if dim == 3:\n begin = begin[1:4]\n stride = stride[1:4]\n elif dim == 4:\n begin = begin[:-1]\n stride = stride[:-1]\n\n return tf.strided_slice(tensor, begin, tensor.shape, stride)",
"def points_in_group(group, binnums):\n return np.concatenate(([points_in_bin(binn, binnums) for binn in\n group]))",
"def get_group_idx(self) -> int:\n return self.group_idx"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Takes a key tuple, and removes a key tuple without a primary key, but with a key digest
|
def get_key_with_digest_only(key):
return (key[0], key[1], None, key[3])
|
[
"def remove_by_hash(hashval: str) -> None:\n key_details = get_keys()\n with authorized_keys(\"w\") as ak:\n for keyhash, key in key_details:\n if keyhash != hashval:\n ak.write(f\"{key}\\n\")\n break\n else:\n raise KeyError(hashval)",
"def test_pos_remove_bin_with_policy_key_digest(self, put_data):\n key = (\"test\", \"demo\", None, bytearray(\"asd;as[d'as;djk;uyfl\", \"utf-8\"))\n record = {\"age\": 1, \"name\": \"name1\"}\n\n put_data(self.as_connection, key, record)\n policy = {\"key\": aerospike.POLICY_KEY_DIGEST}\n self.as_connection.remove_bin(key, [\"age\"], {}, policy)\n\n (key, _, bins) = self.as_connection.get(key)\n\n del record[\"age\"]\n assert bins == record\n assert key == (\"test\", \"demo\", None, bytearray(b\"asd;as[d'as;djk;uyfl\"))",
"def __delitem__(self, key: Union[Any, Sequence[Any]]) -> None:\n self.contents = {\n i: self.contents[i] \n for i in self.contents if i not in more_itertools.always_iterable(key)}\n return self",
"def drop_password_key(data):\n if not isinstance(data, dict):\n return\n\n for key in data.keys():\n if key in ENCRYPT_LIST:\n del data[key]\n elif data[key] and isinstance(data[key], dict):\n drop_password_key(data[key])",
"def _wipe_key(self):\n\n try:\n del self.aes_key\n except AttributeError as exc:\n pass",
"def remove_password_from_record(record: Dict[str, str]) -> Dict[str, str]:\n if \"password\" in record.keys():\n del record[\"password\"]\n return record",
"def test_key_deletion(self):\n pass",
"def _wipe_key(self):\n\t\ttry:\n\t\t\tdel self.aes_key\n\t\texcept AttributeError:\n\t\t\tpass",
"def testStripKeys(self):\n skeys = ['_id']\n expect = {'pileupId': 1}\n pdict = {'pileupId': 1, '_id': 1}\n pdict = stripKeys(pdict, skeys)\n self.assertDictEqual(pdict, expect)\n\n pdict = {'pileupId': 1, '_id': 1}\n results = [pdict]\n results = stripKeys(results, skeys)\n self.assertDictEqual(pdict, expect)",
"def normalize_key(key: Any):",
"def strip_key(key):\n return key.replace(REPLACEMENT_DELIMITER, \"\")",
"def test_pos_remove_with_policy_key_digest(self):\n\n key = (\"test\", \"demo\", None, bytearray(\"asd;as[d'as;djk;uyfl\", \"utf-8\"))\n meta = {\"gen\": 0}\n policy = {\"retry\": aerospike.POLICY_RETRY_ONCE, \"key\": aerospike.POLICY_KEY_DIGEST}\n retobj = self.as_connection.put(key, policy)\n\n assert retobj == 0\n\n retobj = self.as_connection.remove(key, meta, policy)\n\n assert retobj == 0\n\n with pytest.raises(e.RecordNotFound) as exception:\n (key, meta, _) = self.as_connection.get(key)\n\n (code, msg, _, _) = exception.value\n assert msg == \"AEROSPIKE_ERR_RECORD_NOT_FOUND\"\n assert code == 2",
"def unload(self, key):\n assert isinstance(key, PGPKey)\n pkid = id(key)\n if pkid in self._keys:\n # remove references\n [ kd.remove(pkid) for kd in [self._pubkeys, self._privkeys] if pkid in kd ]\n # remove the key\n self._keys.pop(pkid)\n\n # remove aliases\n for m, a in [ (m, a) for m in self._aliases for a, p in m.items() if p == pkid ]:\n m.pop(a)\n # do a re-sort of this alias if it was not unique\n if a in self:\n self._sort_alias(a)\n\n # if key is a primary key, unload its subkeys as well\n if key.is_primary:\n [ self.unload(sk) for sk in key.subkeys.values() ]",
"def remove_item(self, key, value):\n ...",
"def __remove_reference(self, key, transaction, txn):\n if isinstance(key, tuple):\n #create a byte string key, first validity check in python 3!\n for val in key:\n if sys.version_info[0] >= 3 and isinstance(val, bytes):\n raise DbError(_('An attempt is made to save a reference key '\n 'which is partly bytecode, this is not allowed.\\n'\n 'Key is %s') % str(key))\n key = str(key)\n if isinstance(key, UNITYPE):\n key = key.encode('utf-8')\n if not self.readonly:\n if not transaction.batch:\n old_data = self.reference_map.get(key, txn=txn)\n transaction.add(REFERENCE_KEY, TXNDEL, key, old_data, None)\n #transaction.reference_del.append(str(key))\n self.reference_map.delete(key, txn=txn)",
"def DelKey(self, *args):\n return _snap.TIntFltH_DelKey(self, *args)",
"def rem(pair,pairs):\n remove = []\n for key in pairs: # find entries that contain common indices\n if not set(pair).isdisjoint(key):\n remove += [key]\n for k in remove: # remove entries\n del pairs[k]\n return pairs",
"def DelKey(self, *args):\n return _snap.TIntPrFltH_DelKey(self, *args)",
"def delete_key(self, table, key, topic=None, sync=True):\n t_entries = self._db.table(table)\n t_entries.remove(Query().key == key)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
For every node in the tree, check if it is unique compared to prevously visited nodes. If NOT unique merge the node with node checking against. If unique add to list of unique
|
def greedy(startNode: Node, unique = []):
if (len(unique) == 0):
unique.append(startNode)
for root in unique:
if root.children: # Check if given root has children
for child in root.children: # Check if any children can merge with the current uniques
isUnique = True # Becomes false if a node is able to merge
for node in unique: # Test if any children can merge with any of the unique nodes
if match_labels(child, node): # Match labels between two nodes
merge_states(root, child, node) # Merge the nodes
isUnique = False # Could merge nodes
if isUnique: # No child where able to merge with a unique node, it is therefore a unique node
unique.append(child) # Could not merge nodes, node is unique
|
[
"def remove_duplicates(self):\n seen = set()\n self.nodes = [x for x in self.nodes if x not in seen and not seen.add(x)]",
"def merge_duplicates(self, tree: list):\n tree_set = []\n\n for node in tree:\n if node in tree_set:\n # node with the same key, merge nodes\n i = tree_set.index(node)\n tree_set[i].file += node.file\n tree_set[i].line += node.line\n continue\n tree_set.append(node)\n\n print('%smerged %i duplicate keys' % (Fore.GREEN, len(tree) - len(tree_set)))\n return tree_set",
"def backtracking(apta: Apta, unique = []):\n\t# print(unique)\n\tif(len(unique) == 0):\n\t\tunique.append(apta.root)\n\t\n\t# Check if finished\n\tif apta.complete() == True:\n\t\treturn\n\t# Check promising\n\tfor root in unique:\n\t\tif root.children:\n\t\t\tfor child in root.children:\n\t\t\t\tif child not in unique:\n\t\t\t\t\tfor node in unique:\n\t\t\t\t\t\tif match_labels(child, node):\n\t\t\t\t\t\t\ttemp_node = deepcopy(node)\n\t\t\t\t\t\t\tmerge_states(root, child, node)\n\t\t\t\t\t\t\tbacktracking(apta, list(unique))\n\t\t\t\t\t\t\tnode = temp_node\n\t\t\t\t\t\telse: unique.append(child)\n\treturn\n\t# if current_node.children:\n\t\t# for child in current_node.children:\n\t\t\t# for node in unique:\n\t\t\t\t# if match_labels(child, node):\n\t\t\t\t\t# apta.copy_tree()\n\t\t\t\t\t# merge_states(current_node, child, node)\n\t\t\t\t\t# backtracking(apta, apta.get_unique(unique))\n\t\t\t# unique.append(child)\n\t\t\t# backtracking(apta, child)",
"def __remove_duplicates_memory_heavy(self):\n m = {self.item: 1} \n current = self.next\n prev = self\n nodes_to_remove = []\n\n while current is not None:\n\n if current.item not in m:\n m[current.item] = 1\n else:\n nodes_to_remove.append((prev, current))\n\n prev = current \n current = current.next\n \n for i in range(len(nodes_to_remove) - 1):\n this = nodes_to_remove[i]\n nx = nodes_to_remove[i + 1] \n if this[1] == nx[0]:\n nodes_to_remove[i + 1] = (this[0], nx[1]) \n continue\n \n self.__remove(this) \n\n if len(nodes_to_remove) != 0:\n self.__remove(nodes_to_remove[-1])",
"def __remove_duplicates_fast_memory_heavy(self):\n for (item, start_node) in self.distinct_map.items():\n current = start_node.next_alike\n while current is not None:\n self.__remove_node(current) \n current = current.next_alike",
"def removeDuplicates(self, nodes):\n dict = {}\n [dict.setdefault(n.myLabel(),[]).append(n) for n in nodes]\n\n for label in dict.keys():\n list = dict[label]\n if len(list)>1:\n testKit = list[0]\n for kit in list:\n if not testKit.equivalent(kit):\n msg = 'Node with label %s has non-equivalent '% label\n msg += 'duplicates: removing all such nodes'\n logger.error(msg)\n [nodes.remove(n) for n in list]\n break",
"def _get_duplicates(self, root):\n pass",
"def removeDuplicates(vlist):\n ans = []\n for vtree in vlist :\n if not vtree in ans :\n ans.append(vtree)\n return ans",
"def isolate_graph(nodes):\n for n in nodes:\n n.children = list(nodes & set(n.children))\n n.parents = list(nodes & set(n.parents))",
"def get_all_unique_neighbours(self):\n unique_left = set(self.__left.get_neighbours())\n unique_right = set(self.__right.get_neighbours())\n\n all_neighbours = unique_left.union(unique_right)\n all_neighbours.add(self.__local_node)\n\n return all_neighbours",
"def adding_nodes(self):\n \n for node in self.vertex:\n i = 0\n if node not in self.queue:\n self.queue.append(node)\n\n for neigbor in self.neighbors:\n if node == neigbor[0]:\n if neigbor[-1] not in self.queue:\n self.queue.append(neigbor[-1])\n \n self.visited.append(self.queue.pop(i))\n\n return self.visited",
"def _resort_mapper( self ):\n for s in self._symbol_mapper[ \"node\" ]:\n for k, l in self._symbol_mapper[ \"node\" ][ s ].iteritems( ):\n self._symbol_mapper[ \"node\" ][ s ][ k ] = sorted( set( l ) )",
"def exc1_remove_dups(ll: LinkedList) -> LinkedList:\n lookup = set()\n if not ll.head or not ll.head.next:\n return ll\n node = ll.head.next\n previous = ll.head\n lookup.add(previous.data)\n while node:\n if node.data in lookup:\n previous.next = node.next\n else:\n lookup.add(node.data)\n previous = node\n node = node.next\n return ll",
"def dupes_in_list(self, l):\n seen = set()\n seen_twice = set()\n # Adds all elements it doesn't know yet to seen and\n # adds all others to seen_twice\n for x in l:\n if self.hashable(x):\n if x in seen:\n seen_twice.add(x)\n else:\n seen.add(x)\n return list(seen_twice)",
"def allvisited(nodes):\n for node in nodes:\n if not node.visited: \n return False\n return True",
"def removeDuplicate(d) :\n ans = []\n for prim in d :\n if not(prim in ans):\n ans = [prim] + ans\n return ans",
"def get_all_unique_neighbours(self):\n unique_neighbours = set()\n for ring in self.__rings:\n unique_neighbours |= ring.get_all_unique_neighbours()\n return unique_neighbours",
"def successors(self, node):\n # get mapping of {parent : child}\n children = self.children\n # if node doesn't exist return early\n if node not in children:\n return set([node])\n\n # children we need to collect\n queue = [node]\n # start collecting values with children of source\n collected = set(queue)\n\n # cap maximum iterations\n for _ in range(len(self.node_data) + 1):\n if len(queue) == 0:\n # no more nodes to visit so we're done\n return collected\n # add the children of this node to be processed\n childs = children.get(queue.pop())\n if childs is not None:\n queue.extend(childs)\n collected.update(childs)\n return collected",
"def get_connected_nodes(self, node):\n result = set()\n new_connected = {node}\n while new_connected:\n result.update(new_connected)\n next_new = set()\n for new_node in new_connected:\n # add new nodes to next new\n next_new.update(self._data[new_node] - result)\n new_connected = next_new\n return result"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Similar to greedy Attempt to match labels If a match is found, copy the tree and merge the nodes Continue until there are no nodes in tree that are not unique See if the tree is complete, check if every node has labels, if not assign label at random Test if the tree is correct, if not backtrack, pop tree from list
|
def backtracking(apta: Apta, unique = []):
# print(unique)
if(len(unique) == 0):
unique.append(apta.root)
# Check if finished
if apta.complete() == True:
return
# Check promising
for root in unique:
if root.children:
for child in root.children:
if child not in unique:
for node in unique:
if match_labels(child, node):
temp_node = deepcopy(node)
merge_states(root, child, node)
backtracking(apta, list(unique))
node = temp_node
else: unique.append(child)
return
# if current_node.children:
# for child in current_node.children:
# for node in unique:
# if match_labels(child, node):
# apta.copy_tree()
# merge_states(current_node, child, node)
# backtracking(apta, apta.get_unique(unique))
# unique.append(child)
# backtracking(apta, child)
|
[
"def select_case_2(data,labels,T,budget,batch_size):\n\n n_nodes = len(T[1]) #total nodes in T\n n_samples = len(data) #total samples in data\n L = np.zeros(n_nodes) #majority label\n p1 = np.zeros(n_nodes) #empirical label frequency\n n = np.zeros(n_nodes) #number of points sampled from each node\n error = []#np.zeros(n_samples) #error at each round\n root = n_nodes-1 #corresponds to index of root\n P = np.array([root])\n L[root] = 1\n\n for i in range(budget):\n v_selected = np.array([])\n\n for b in range(batch_size):\n #TODO: select a node from P biasing towards choosing nodes in areas where the observed labels are less pure\n\n w = np.array([])\n za = 0.95\n\n for j in range(len(P)):\n leaves = get_leaves([], P[j], T, n_samples)\n num_leaves = len(leaves)\n p_v = max(p1[P[j]], 1-p1[P[j]]) # majority label frequency\n p_up = p_v + za * np.sqrt(p_v * (1-p_v)/num_leaves)\n wv = num_leaves/n_samples\n\n w = np.append(w, wv * (1.0 - p_up))\n\n if (np.sum(w) == 0):\n w = w + 1.0/len(w)\n else:\n w = w / np.sum(w)\n #print(\"weights:\", w)\n\n v = random.choices(population = range(len(P)), weights = w, k=1)\n v = P[v[0]]\n #print(\"Selected internal node:\", v)\n\n #TODO: pick a random leaf node from subtree Tv and query its label\n z = random.choice(get_leaves([], v, T, n_samples))\n #print(\"Selected to query:\", z)\n l = labels[z]\n\n #TODO: update empirical counts and probabilities for all nodes u on path from z to v\n z = np.array([z])\n n, p1 = update_empirical(n,p1,v,z,l,T)\n\n v_selected = np.append(v_selected, v)\n v_selected = v_selected.astype(int)\n\n #TODO: update admissible A and compute scores; find best pruning and labeling\n P_best, L[v] = best_pruning_and_labeling(n,p1,v_selected,T,n_samples)\n #print(\"best Pruning:\", P_best)\n #TODO: update pruning P and labeling L\n P = np.delete(P, np.argwhere(P==v))\n P = np.union1d(P, P_best)\n #print(\"Updated Pruning:\", P)\n\n #TODO: temporarily assign labels to every leaf and compute error\n L = assign_labels(L,v_selected,v_selected,T,n_samples)\n e = compute_error(L,labels)\n error.append(e)\n\n if (i % 100 == 0):\n print(e)\n\n for v in P:\n #TODO: assign labels to all nodes under the current pruning\n L = assign_labels(L,v,v,T,n_samples)\n\n return L, np.array(error)",
"def greedy(startNode: Node, unique = []):\n\n\tif (len(unique) == 0):\n\t\tunique.append(startNode)\n\tfor root in unique:\t\n\t\tif root.children:\t# Check if given root has children\n\t\t\tfor child in root.children:\t\t# Check if any children can merge with the current uniques\n\t\t\t\tisUnique = True\t\t\t\t# Becomes false if a node is able to merge\n\t\t\t\tfor node in unique:\t\t\t# Test if any children can merge with any of the unique nodes\n\t\t\t\t\tif match_labels(child, node):\t# Match labels between two nodes\n\t\t\t\t\t\tmerge_states(root, child, node)\t# Merge the nodes\n\t\t\t\t\t\tisUnique = False\t# Could merge nodes\n\t\t\t\tif isUnique:\t\t\t\t# No child where able to merge with a unique node, it is therefore a unique node\n\t\t\t\t\tunique.append(child) # Could not merge nodes, node is unique",
"def get_random_tagged_tree(number_leafnodes, percentage_parasites, percentage_unknown, p_multifurcation, beta_distribution_parameters):\n # Arguments:\n # number_leafnodes - needed for randomized function\n # percentage_parasites\n # percentage_unknown - proportion of unknown leafnodes\n # percentage_multifurcation\n # beta_distribution_parameters - [A_FL, B_FL, A_P, B_P]\n\n global percentage_multifurcation\n percentage_multifurcation = p_multifurcation\n\n START_TIME = datetime.datetime.now().replace(microsecond=0)\n CURRENT_TIME = datetime.datetime.now().replace(microsecond=0)\n print(\"---- randomized tree ----\")\n current_percentage_parasites = 0\n # randomized(cls, taxa, branch_length=1.0, branch_stdev=None) \n # Create a randomized bifurcating tree given a list of taxa.\n # https://github.com/biopython/biopython/blob/master/Bio/Phylo/BaseTree.py\n randomized_tree = Phylo.BaseTree.Tree.randomized(number_leafnodes)\n randomized_tree.clade.name = 'root'\n boolean = True\n CURRENT_TIME = print_time(START_TIME)\n print(\"---- tag tree ----\")\n while boolean:\n current_tree = deepcopy(randomized_tree)\n result = tag_tree(current_tree.clade, [], 0, [0, 0], percentage_parasites, percentage_unknown, beta_distribution_parameters) # father_tag = 0 -> free living\n nodelist = result[1]\n leaf_distr = result[2]\n # child_depth = child_depth + result[3]\n # %P = #FL / (#P + #FL) * 100\n current_percentage_parasites = leaf_distr[1] / (leaf_distr[0] + leaf_distr[1]) \n print(\"tried\", current_percentage_parasites*100, \"% of parasites\") # 40% parasites?\n if (percentage_parasites - permitted_deviation) < current_percentage_parasites < (percentage_parasites + permitted_deviation):\n boolean = False\n print(\"----\")\n CURRENT_TIME = print_time(CURRENT_TIME)\n print(\"----\")\n # print(current_percentage_parasites, '% parasites,', 100 - current_percentage_parasites, '% free-living')\n return [current_tree, nodelist]",
"def prune(t, k):\n\tif k == 0:\n\t\treturn tree(label(t))\n\telse:\n\t\treturn tree(label(t), [prune(b, k-1) for b in branches(t)])",
"def removeDuplicates(self, nodes):\n dict = {}\n [dict.setdefault(n.myLabel(),[]).append(n) for n in nodes]\n\n for label in dict.keys():\n list = dict[label]\n if len(list)>1:\n testKit = list[0]\n for kit in list:\n if not testKit.equivalent(kit):\n msg = 'Node with label %s has non-equivalent '% label\n msg += 'duplicates: removing all such nodes'\n logger.error(msg)\n [nodes.remove(n) for n in list]\n break",
"def build_suffix_tree(text):\r\n result = []\r\n # Implement this function yourself\r\n #text = text[:-1]\r\n #print(\"text is\", text)\r\n tree = []\r\n tree.append([])\r\n counter = 0\r\n for i in range(len(text)):\r\n pat = text[i:]\r\n #print()\r\n #print(i, \"pat is\", pat)\r\n cn, flag = 0, 0\r\n head = 0\r\n while not flag:\r\n cnprev = cn\r\n #print(\"cn is now\", cn)\r\n for j in range(len(tree[cn])):\r\n (pos, l, d) = tree[cn][j]\r\n if text[pos] == pat[0 + head]:\r\n #print(\"Match!\")\r\n lab = text[pos:pos + l]\r\n n = strcmp(pat[head:], lab)\r\n #print(\"n is\", n)\r\n if n == len(pat) == len(lab):\r\n flag = 1\r\n elif n == len(lab) and n != len(pat):\r\n if d > 0:\r\n cn = d\r\n head += len(lab)\r\n #print(\"Moved over\", lab)\r\n else:\r\n #print(\"ACHTUNG!\")\r\n pass # Undefined behavior\r\n else:\r\n #print(\"Common part is\", pat[head:head + n])\r\n counter += 1\r\n tree[cn][j] = (pos, n, counter)\r\n cn = counter\r\n tree.append([])\r\n tree[cn].append((head + i + n, len(pat[head + n:]), 0))\r\n tree[cn].append((pos + n, len(lab[n:]), d))\r\n #print(\"First part is\", pat[head + n:])\r\n #print(\"Second part is\", lab[n:])\r\n flag = 1\r\n break\r\n if cn == cnprev and not flag:\r\n tree[cn].append((i + head, len(pat[head:]), 0))\r\n flag = 1\r\n #print(i, \"tree is\", tree)\r\n #tree[0].append((len(text), 0, 0))\r\n for i in range(len(tree)):\r\n for j in range(len(tree[i])):\r\n (pos, l, d) = tree[i][j]\r\n result.append(text[pos:pos + l])\r\n return result",
"def test_case_2_2(self):\n \n g = Red_Black_Tree()\n \n n2 = Red_Black_Node(2)\n n1 = Red_Black_Node(1)\n n4 = Red_Black_Node(4)\n n3 = Red_Black_Node(3)\n \n n2.color = BLACK\n n1.color = BLACK\n n4.color = RED\n n3.color = BLACK\n \n n2.p = g.nil\n n2.left = n1\n n1.p = n2\n n2.right = n4\n n4.p = n2\n \n n1.left = g.nil\n n1.right = g.nil\n \n n4.left = n3\n n3.p = n4\n n4.right = g.nil\n g.nil.p = n4\n \n n3.left = g.nil\n n3.right = g.nil\n \n g.root = n2\n g.Delete_Fixup(g.nil)\n \n self.test_bst(g)\n self.test_properties(g)",
"def extract_graph_from_skeleton(sk): \n #used/unsused\n sk_used = np.zeros_like(sk)\n sk_unused = np.copy(sk)\n #root node\n root_position = findroot(sk)\n print('root_position',root_position)\n root = Branch(pixels=[root_position],name='root')\n setvalue(sk_used,root_position,1)\n setvalue(sk_unused,root_position,0)\n #extract rood edge\n edgelist,branchlist,endlist = next_pixels(root_position,sk_used,sk_unused)\n #assert len(edgelist)==1,'root has more than 1 branchedge'################!!!!!!!!\n rootedge = BranchEdge(edgelist[:1])\n while True:\n edgelist,branchlist,endlist = next_pixels(edgelist[0],sk_used,sk_unused)\n if edgelist:\n rootedge.add_pixels(edgelist)\n else:\n break\n assert len(branchlist)>=1,'root has no children'\n #first node(perhaps split LM and RM)\n branch1 = Branch(pixels=branchlist)\n root.add_child(branch1,rootedge)\n branch_startpoint_list = [branch1]##BFS\n edge_startpoint_list = []\n while branch_startpoint_list:\n branch1 = branch_startpoint_list.pop(0)\n edgelist,branchlist,endlist = next_pixels(branch1.pixels[0],sk_used,sk_unused)\n edge_startpoint_list = edgelist\n branch_cumulate_list = branchlist\n while branch_cumulate_list:#cumulate all the branch pixels(>3)\n bposition = branch_cumulate_list.pop(0)\n branch1.add_pixel(bposition)\n edgelist,branchlist,endlist = next_pixels(bposition,sk_used,sk_unused)\n edge_startpoint_list += edgelist\n branch_cumulate_list += branchlist\n #for each connected edge start,trace until next node\n for edge in edge_startpoint_list:\n branchedge1 = BranchEdge([edge])\n edgelist,branchlist,endlist = next_pixels(edge,sk_used,sk_unused)\n while edgelist:#trace until next node\n #print('edgelist',edgelist)\n branchedge1.add_pixels(edgelist)\n edgelist,branchlist,endlist = next_pixels(edgelist[0],sk_used,sk_unused)\n if branchlist:#next branch\n branch2 = Branch(pixels=branchlist)\n ##if branchedge too short, do nothing\n branch1.add_child(branch2,branchedge1)\n branch_startpoint_list.append(branch2)\n elif endlist:#end node\n branch2 = Branch(pixels=endlist)\n ##if branchedge too short, threshold based on rank(todo)\n branch1.add_child(branch2,branchedge1)\n else:#end without endlist (pixel value=3)\n branch2 = Branch(pixels=branchedge1.pixels[-1:])\n ##if branchedge too short, threshold based on rank(todo)\n branch1.add_child(branch2,branchedge1)\n #if this branch has only one edge, merge(may throw assert error)\n if len(branch1.edges) == 1:\n branch1.edges[0].endbracnch.rank-=1\n branch1.parent_edge.endbracnch = branch1.edges[0].endbracnch\n branch1.parent_edge.add_pixels_nocontinious(branch1.pixels)\n branch1.parent_edge.add_pixels(branch1.edges[0].pixels)\n branch1.edges[0].endbracnch.parent_edge = branch1.parent_edge\n return root",
"def setUp(self):\n self.Empty = TreeNode()\n self.Single = TreeNode(Name='a')\n self.Child = TreeNode(Name='b')\n self.OneChild = TreeNode(Name='a', Children=[self.Child])\n self.Multi = TreeNode(Name = 'a', Children='bcd')\n self.Repeated = TreeNode(Name='x', Children='aaa')\n self.BigName = map(TreeNode, '0123456789')\n self.BigParent = TreeNode(Name = 'x', Children = self.BigName)\n self.Comparisons = map(TreeNode, 'aab')\n \n nodes = dict([(x, TreeNode(x)) for x in 'abcdefgh'])\n nodes['a'].append(nodes['b'])\n nodes['b'].append(nodes['c'])\n nodes['c'].append(nodes['d'])\n nodes['c'].append(nodes['e'])\n nodes['c'].append(nodes['f'])\n nodes['f'].append(nodes['g'])\n nodes['a'].append(nodes['h'])\n self.TreeNode = nodes\n self.TreeRoot = nodes['a']\n\n self.s = '((H,G),(R,M));'\n self.t = DndParser(self.s, TreeNode)\n self.s2 = '(((H,G),R),M);'\n self.t2 = DndParser(self.s2, TreeNode)\n self.s4 = '(((H,G),(O,R)),X);'\n self.t4 = DndParser(self.s4, TreeNode)",
"def __init__(self, list_of_sequences=[], label=[]):\n self.nodes = set([])\n self.nodes_label = {}\n self.node_children_dict = {}\n self.node_parent_dict = {}\n self.node_sequence_dict = {}\n self.node_visits_dict = {}\n self.node_full_sequence_dict = {}\n self.list_of_sequences = list_of_sequences\n self.number_of_objects = len(list_of_sequences)\n self.number_of_classes = len(set(label))\n self.label = label\n\n nodes = set([])\n structure = {}\n dict_seq = {}\n dict_num = {}\n dict_prev = {}\n dict_all_seq = {}\n\n num_of_sequence = 0\n # len_of_seq = len(self.list_of_sequences)\n free_node = 1\n seq1 = self.list_of_sequences[0]\n nodes.add(0)\n current_node = 0\n dict_all_seq[0] = []\n\n for i in seq1:\n structure[current_node] = [free_node]\n structure[free_node] = []\n dict_prev[free_node] = current_node\n\n str_seq = [elem for elem in i]\n dict_all_seq[free_node] = dict_all_seq[current_node][:]\n dict_all_seq[free_node].append(str_seq)\n\n if type(str_seq) == list:\n dict_seq[free_node] = str_seq\n else:\n dict_seq[free_node] = [str_seq]\n # dict_num[free_node] = 1\n dict_num[free_node] = [0 for i in range(self.number_of_classes)]\n dict_num[free_node][self.label[num_of_sequence]] = 1\n current_node = free_node\n free_node += 1\n\n for seq in self.list_of_sequences[1:]:\n num_of_sequence += 1\n current_node = 0\n for elem in seq:\n str_seq = [i for i in elem]\n if len(structure[current_node]) > 0:\n temp_seq = [dict_seq[son] for son in structure[current_node]]\n flag = str_seq in temp_seq\n if flag:\n number = 0\n while temp_seq[number] != str_seq:\n number += 1\n current_node = structure[current_node][number]\n # dict_num[current_node] += 1\n dict_num[current_node][self.label[num_of_sequence]] += 1\n else:\n structure[current_node].append(free_node)\n dict_prev[free_node] = current_node\n dict_all_seq[free_node] = dict_all_seq[current_node][:]\n dict_all_seq[free_node].append(str_seq)\n current_node = free_node\n structure[current_node] = []\n # dict_num[current_node] = 1\n dict_num[current_node] = [0 for i in range(self.number_of_classes)]\n dict_num[current_node][self.label[num_of_sequence]] = 1\n dict_seq[current_node] = str_seq\n free_node += 1\n else:\n structure[current_node].append(free_node)\n dict_prev[free_node] = current_node\n dict_all_seq[free_node] = dict_all_seq[current_node][:]\n dict_all_seq[free_node].append(str_seq)\n current_node = free_node\n structure[current_node] = []\n # dict_num[current_node] = 1\n dict_num[current_node] = [0 for i in range(self.number_of_classes)]\n dict_num[current_node][self.label[num_of_sequence]] = 1\n dict_seq[current_node] = str_seq\n free_node += 1\n\n self.nodes = nodes\n self.node_children_dict = structure\n self.node_sequence_dict = dict_seq\n self.node_visits_dict = dict_num\n self.node_parent_dict = dict_prev\n self.node_full_sequence_dict = dict_all_seq\n\n dic_all_seq_rev = {str(v): k for k, v in self.node_full_sequence_dict.items()}\n self.node_full_sequence_dict_reversed = dic_all_seq_rev",
"def remove_random(n, T, G):\n nodes = T.nodes()\n #print(nodes)\n to_remove = np.random.choice(nodes, n)\n # print(to_remove)\n T_copy = T.copy()\n for x in to_remove:\n if x in T_copy:\n T_copy.remove_node(x)\n #count = 0 \n \n def find_tree(n, T_copy, G):\n\n if n == 0:\n return T.copy()\n\n iters = 0\n\n # print(n, len(T_copy.nodes()))\n\n while not (is_valid_network(G, T_copy)):\n if iters >= 20:\n T_copy = T.copy()\n break\n T_copy = T.copy()\n # print(len(T_copy.nodes()), len(T.nodes()))\n to_remove = np.random.choice(T_copy.nodes(), n, replace=False)\n for x in to_remove:\n T_copy.remove_node(x)\n #count += 1\n iters += 1\n \n if not is_valid_network(G, T_copy):\n \n return find_tree(n - 1, T_copy, G)\n \n return T_copy\n \n return find_tree(n, T_copy, G)",
"def label_reference_subtree(ll, new_sample_string):\n\n for node in ll.Objects:\n node.traits['ref'] = False\n\n ref_nodes = ll.getExternal(lambda k: new_sample_string != k.traits['node_attrs']['submitting_lab']['value'])\n while len(ref_nodes) > 0:\n node = ref_nodes.pop()\n node.traits['ref'] = True\n if node.parent and not node.parent.traits.get('ref'):\n ref_nodes.append(node.parent)",
"def predict(tree, x, y = []):\n\n\t#conditions of continuous and discrete features\n\tnode_id = 1 #initialize node identifier as first node under the root\n\twhile 1:\n\t\tnodes = tree[node_id]\n\n\t\tif nodes[0][5] == \"c\":\n\t\t\tif x[nodes[0][1]] <= nodes[0][2]:\n\t\t\t\tindex, node_id = 0, nodes[0][0] #set identifier of child node\n\t\t\telse:\n\t\t\t\tindex, node_id = 1, nodes[1][0] #set identifier of child node\n\t\telse:\n\t\t\tif x[nodes[0][1]] in nodes[0][2]:\n\t\t\t\tindex, node_id = 0, nodes[0][0] #set identifier of child node\n\n\t\t\telif x[nodes[1][1]] in nodes[1][2]:\n\t\t\t\tindex, node_id = 1, nodes[1][0] #set identifier of child node\n\n\t\t\telse:\n\t\t\t\t#value is not in left or right branch. Get label distributions of left and right child\n\t\t\t\t#sum labels distribution to get parent label distribution\n\t\t\t\tnode_id = str(nodes[0][0]) + \",\" + str(nodes[1][0])\n\t\t\t\tindex, nodes = 0, [[0,0,0,{ k: nodes[0][3].get(k, 0) + nodes[1][3] .get(k, 0) for k in set(nodes[0][3]) | set(nodes[1][3] )}]]\n\t\t\t\t#print node_id, nodes[0][3], y\n\n\t\tif node_id in tree.keys(): #check if tree can be traversed further\n\t\t\tcontinue\n\t\t\n\t\tprediction = max(nodes[index][3], key = nodes[index][3].get)\n\t\tif y == []:\n\t\t\treturn prediction\n\t\t\n\t\tprobs = sorted(zip(nodes[index][3].keys(), np.true_divide(nodes[index][3].values(), np.sum(nodes[index][3].values()))), key = itemgetter(1), reverse = True)\n\t\tif prediction == y:\n\t\t\tmargin = probs[0][1] - probs[1][1] if len(probs) > 1 else 1\n\t\telse:\n\t\t\tmargin = dict(probs).get(y, 0) - probs[0][1]\n\t\treturn node_id, margin",
"def sample_random_node(self):\n #Naive Approach \n return self.tree[int(self.rng.random()*len(self.tree))] # OUT OF BOUNDS ERRORS? Check this",
"def sub_tree_gen(T, k, i, rand, version=SHETVersion.Index):\n tree_i = [rand.next_element(T, 0)[0]]\n\n # the Ti tree contains this node\n tree_i[0].cliqueList.append(i)\n\n if k <= 1:\n return tree_i\n\n k_i = rand.next_random(1, 2 * k - 1)\n s_y = 0\n for _ in range(1, k_i):\n # after sy we have nodes with neighbors outside\n y, yi = rand.next_element(tree_i, s_y)\n # after y.s in y.Ax there is a neighbor of y outside\n z, zi = y.Ax[y.s], y.s # rand.next_element(y.Ax, y.s)\n\n # add z to Ti\n tree_i.append(z)\n z.cliqueList.append(i) # add to the z node of T the {i} number of Ti\n\n # fix y.Ax\n if zi != y.s:\n y.Ax[zi], y.Ax[y.s] = y.Ax[y.s], y.Ax[zi]\n if version != SHETVersion.Index:\n y.Dx[z] = y.s\n y.Dx[y.Ax[zi]] = zi\n y.s += 1\n\n # now fix z\n if z.Ax[z.s] != y:\n if version == SHETVersion.Index:\n yzi = z.Ax.index(y)\n z.Ax[yzi], z.Ax[z.s] = z.Ax[z.s], z.Ax[yzi]\n else:\n yzi = z.Dx[y]\n z.Ax[yzi], z.Ax[z.s] = z.Ax[z.s], z.Ax[yzi]\n z.Dx[y] = z.s\n z.Dx[z.Ax[yzi]] = yzi\n z.s += 1\n\n # if degree of y equals the seperation index on adjacency list, y\n # cannot be selected any more\n if y.s > len(y.Ax) - 1:\n tree_i[s_y], tree_i[yi] = tree_i[yi], tree_i[s_y]\n s_y += 1\n\n if len(z.Ax) == 1:\n tree_i[s_y], tree_i[-1] = tree_i[-1], tree_i[s_y]\n s_y += 1\n\n for node in tree_i:\n node.s = 0\n\n return tree_i",
"def subsample_pathologies(labels_path, per_class = 20000, val_split = 0.1, no_finding_lim=7810, patho = ['Atelectasis', 'Lung Opacity', 'Pleural Effusion', 'Support Devices', ]):\n\n if no_finding_lim > per_class:\n no_finding_lim = per_class\n\n labels = pd.read_csv(labels_path)\n\n patho_labels = labels[patho].values\n\n uncertain = (patho_labels == 1).any(axis=1)\n present = (patho_labels == 2).any(axis=1)\n frontal = labels[\"Frontal/Lateral\"] == \"Frontal\"\n no_finding = labels[\"No Finding\"] == 1\n\n # image shapes\n wide_angel = (labels[\"height\"] / labels[\"width\"]) <= 1\n not_too_wide = (labels[\"height\"] / labels[\"width\"]) >= 3 / 4\n\n fine_sizes = np.logical_and(wide_angel, not_too_wide)\n\n # print(no_finding.sum(), np.logical_and(fine_sizes, no_finding).sum())\n\n bool_indices = np.logical_or(present, no_finding)\n bool_indices = np.logical_and(bool_indices, np.logical_not(uncertain))\n bool_indices = np.logical_and(bool_indices, frontal)\n bool_indices = np.logical_and(bool_indices, fine_sizes)\n\n indices_list = []\n\n columns = [\"No Finding\"] + patho\n\n rng = np.random.default_rng()\n for col in columns:\n if col != \"No Finding\":\n p_indices = np.where(np.logical_and((labels[col].values == 2), bool_indices))\n print(col, p_indices[0].shape[0])\n\n sample_ids = rng.choice(p_indices[0].shape[0], per_class, replace=False)\n\n else:\n p_indices = np.where(np.logical_and((labels[col].values == 1), bool_indices))\n print(col, p_indices[0].shape[0])\n\n sample_ids = rng.choice(p_indices[0].shape[0], no_finding_lim, replace=False)\n\n p_chosen = p_indices[0][sample_ids]\n\n\n p_chosen_bool = np.zeros((labels.shape[0],))\n p_chosen_bool[p_indices[0][sample_ids]] = 1\n\n bool_indices = np.logical_and(bool_indices, np.logical_not(p_chosen_bool))\n\n indices_list.append(p_chosen)\n\n indices = np.concatenate(indices_list)\n\n exclude = [p for p in ds.PATHOLOGIES if p not in patho]\n\n chosen = labels.drop(exclude, axis=1).iloc[indices]\n\n for p in patho:\n\n chosen.loc[chosen[p] == 2, p] = 1\n\n \"\"\"\n y, x = np.where(chosen_train[patho] == 2)\n patho_labels = chosen_train[patho].copy()\n patho_labels.iloc[y,x] = 1\n chosen_train[patho] = patho_labels\n\n chosen_val = labels.drop(exclude, axis=1).iloc[indices_val]\n #y, x = np.where(chosen_val[patho] == 2)\n #patho_labels = chosen_val[patho].copy()\n #patho_labels.iloc[y,x] = 1\n #chosen_val[patho] = patho_labels\n \"\"\"\n\n #sample_idx = rng.choice(size, , replace=False)\n\n #patho_adjust = [''.join(p.split(' ')) for p in patho]\n\n rng = np.random.default_rng()\n split = rng.choice(chosen.shape[0], int(chosen.shape[0]*(1-val_split)), replace=False)\n\n chosen_bool = np.zeros((chosen.shape[0],), dtype=bool)\n chosen_bool[split] = True\n chosen_train = chosen.loc[chosen_bool,:]\n chosen_val = chosen.loc[np.logical_not(chosen_bool),:]\n\n\n save_path_train = labels_path.split('train.csv')[0] + f'train-{chosen_train.shape[0]}-' + 'reduced' + '.csv'\n save_path_val = labels_path.split('train.csv')[0] + f'valid-{chosen_val.shape[0]}-' + 'reduced' + '.csv'\n\n\n # patho_adjust = [''.join(p.split(' ')) for p in patho]\n\n #save_path_train = (\n # labels_path.split(\"train.csv\")[0]\n # + f\"train-{(len(patho)+1)*train_per_class}-\"\n # + \"reduced\"\n # + \".csv\"\n #)\n #save_path_val = (\n # labels_path.split(\"train.csv\")[0]\n # + f\"valid-{(len(patho)+1)*val_per_class}-\"\n # + \"reduced\"\n # + \".csv\"\n #)\n\n print(save_path_train)\n print(save_path_val)\n chosen_train.to_csv(save_path_train, index=False)\n chosen_val.to_csv(save_path_val, index=False)",
"def generate_random_tree_node(self, current_depth, nominal_att_candidates, min_numeric_vals, max_numeric_vals, rand):\n if ((current_depth >= self.max_tree_depth) | ((current_depth >= self.min_leaf_depth) & (self.fraction_of_leaves_per_level >= (1.0 - rand.rand())))):\n leaf = Node()\n leaf.class_label = rand.randint(0, self.num_classes)\n return leaf\n\n node = Node()\n chosen_att = rand.randint(0, len(nominal_att_candidates))\n if (chosen_att < self.num_numerical_attributes):\n numeric_index = chosen_att\n node.split_att_index = numeric_index\n min_val = min_numeric_vals[numeric_index]\n max_val = max_numeric_vals[numeric_index]\n node.split_att_value = ((max_val - min_val) * rand.rand() + min_val)\n node.children = []\n\n new_max_vals = max_numeric_vals[:]\n new_max_vals[numeric_index] = node.split_att_value\n node.children.append(self.generate_random_tree_node(current_depth + 1, nominal_att_candidates, min_numeric_vals, new_max_vals, rand))\n\n new_min_vals = min_numeric_vals[:]\n new_min_vals[numeric_index] = node.split_att_value\n node.children.append(self.generate_random_tree_node(current_depth + 1, nominal_att_candidates, new_min_vals, max_numeric_vals, rand))\n else:\n node.split_att_index = nominal_att_candidates[chosen_att]\n new_nominal_candidates = array('d', nominal_att_candidates)\n new_nominal_candidates.remove(node.split_att_index)\n\n for i in range(self.num_values_per_nominal_att):\n node.children.append(self.generate_random_tree_node(current_depth + 1, new_nominal_candidates, min_numeric_vals, max_numeric_vals, rand))\n\n return node",
"def prune_min(t):\n while len(t.branches) > 1:\n largest = max(t.branches,key = lambda x: x.label)\n t.branches.remove(largest)\n for b in t.branches:\n prune_min(b)",
"def do_test_insert_4_leafs(self, hashtype):\n check_hashtype(hashtype)\n name = self.rng.next_file_name(8)\n tree = NLHTree(name, hashtype)\n leaf_names = set()\n a_leaf = self.make_leaf(leaf_names, hashtype)\n b_leaf = self.make_leaf(leaf_names, hashtype)\n c_leaf = self.make_leaf(leaf_names, hashtype)\n d_leaf = self.make_leaf(leaf_names, hashtype)\n self.assertEqual(len(tree.nodes), 0)\n tree.insert(a_leaf)\n self.assertEqual(len(tree.nodes), 1)\n tree.insert(b_leaf)\n self.assertEqual(len(tree.nodes), 2)\n tree.insert(c_leaf)\n self.assertEqual(len(tree.nodes), 3)\n tree.insert(d_leaf)\n self.assertEqual(len(tree.nodes), 4)\n # we expect the nodes to be sorted\n for ndx in range(3):\n self.assertTrue(tree.nodes[ndx].name < tree.nodes[ndx + 1].name)\n\n matches = tree.list('*')\n for ndx, qqq in enumerate(tree.nodes):\n self.assertEqual(matches[ndx], ' ' + qqq.name)\n\n self.assertEqual(tree, tree)\n tree2 = tree.clone()\n self.assertEqual(tree2, tree)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
menu for import data TODO ....
|
def menuentriesimport(self):
menutrig = True
while menutrig:
choose = raw_input("Choose your Task:\n Read Database: 1 \n Read vCard: 2 \n back: b \n ::>")
if choose == "1":
pathandfile = raw_input("Enter Path and Filename:")
base = importDataobj.loadDatabase(pathandfile)
i = 0
for baseline in base:
print(baseline)
baseobj.storeDataToBase(baseline)
print(i)
i+=1
menutrig = False
elif choose == "2":
print("TODO")
menutrig = False
else:
if choose.lower == "b":
menutrig = False
menuobj.menuentries()
|
[
"def cmd_import(self):\n self.save()\n path = tkinter_filedialog.askopenfilename(\n initialdir=self.prefs[\"save_directory\"],\n filetypes=[(\"aeneas output ZIP file\", \".zip\"), (\"SMIL file\", \".smil\")],\n parent=self,\n title=\"Select aeneas output (SMIL or ZIP of SMILs)\"\n )\n if (path is not None) and (len(path) > 0) and (os.path.isfile(path)):\n if path.endswith(\".zip\"):\n self.import_zip_file(path)\n elif path.endswith(\".smil\"):\n self.import_smil_file(path)\n self.quit()",
"def importData(self):\n\n self.importDialog = importSpread.importDialog()\n self.importDialog.signals.returnDatasetSignal.connect(self.addImportToDatasets)\n\n return",
"def onActionImportFromJSONTriggered(self):\n fileName = self.openFileDialog(\"Import From JSON\", fileType=\"JSON\", fileExtension=\"json\")\n if fileName:\n serializer = StudentJSONSerializer()\n students = serializer.importFromJSON(fileName)\n # print(students)\n self.generateWindowWithTableWidget(students, \"Import From JSON\")\n else:\n QMessageBox.critical(self, \"<<Error>>\", \"No fileName was given.\")",
"def onActionImportFromXMLTriggered(self):\n fileName = self.openFileDialog(\"Import From XML\", fileType=\"XML\", fileExtension=\"xml\")\n if fileName:\n serializer = StudentXMLSerializer()\n students = serializer.importFromXML(fileName)\n # print(students)\n self.generateWindowWithTableWidget(students, \"Import From XML\")\n else:\n QMessageBox.critical(self, \"<<Error>>\", \"No fileName was given.\")",
"def post_import(self):",
"def import_csv(self):\r\n path = askopenfilename(title=\"Philip Deck - Open\", filetypes=[('CSV files', '*.csv')])\r\n # #ADD ERROR CHECKING\r\n if path is not None and path != \"\":\r\n self.app.set_file(path)\r\n self.app.load_data_from_file()\r\n self.populate_listbox(self.app.data)\r\n self.set_infobox_msg(\"Imported \" + str(self.listbox.size()) + \" rows from \" + path)",
"def admin_import(self, request, queryset):\n pass",
"def db_imports():\n import_energy_data()",
"def handle(self, *args, **options):\n import_alumni(options[\"csv_path\"])",
"def on_file_selected(self,widget,data=None):\n\n self.box.list_store.clear()\n _file = widget.get_filename()\n if _file == '':\n return\n importer = JSON_Importer()\n self.imported_data = importer.from_JSON(_file)\n if self.imported_data is not None:\n self.box.import_button.set_sensitive(True)\n for channel in self.imported_data:\n channel_data = collections.OrderedDict(\n sorted(self.imported_data[channel].items()))\n self.box.list_store.append(channel_data.values())",
"def menu_file_import(self):\n\n self.tmp_fw_list = []\n\n Gtk_Main.Gtk_Main().statusbar.change_message(\"Import ...\")\n\n filenames = self.open_filechooser(\"Import firewall configuration\", multiple_select=True)\n\n if filenames:\n Gtk_Main.Gtk_Main().lateral_pane.help_message.change_message(Gtk_Message.ON_IMPORT_CONF_FILE)\n\n while filenames:\n self.file_popup_menu(filenames.pop(0))\n self.next_file = False\n # freeze execution and wait the parser to finish\n while not self.next_file:\n while gtk.events_pending():\n gtk.main_iteration_do(False)\n time.sleep(0.1)\n\n # Clean all the fw content\n to_delete_lists = []\n for fw in self.tmp_fw_list:\n to_delete = {}\n for idx1, acl1 in enumerate(fw.acl):\n if len(acl1.rules):\n if idx1 in to_delete:\n break\n for idx2, acl2 in enumerate(fw.acl):\n if idx1 == idx2:\n continue\n if idx2 in to_delete:\n continue\n if len(acl2.rules):\n check = 0\n if len(acl1.rules) == len(acl2.rules):\n for idx3, rule in enumerate(acl1.rules):\n if acl1.rules[idx3].identifier == acl2.rules[idx3].identifier:\n check += 1\n if check == len(acl1.rules):\n to_delete[idx1] = \"\"\n else:\n continue\n to_delete_lists.append(to_delete)\n for idx, to_delete in enumerate(to_delete_lists):\n self.tmp_fw_list[idx].acl = [i for j, i in enumerate(self.tmp_fw_list[idx].acl) if j not in to_delete]\n\n Gtk_Main.Gtk_Main().lateral_pane.help_message.change_message(Gtk_Message.TOPOLOGY_MESSAGE)\n Gtk_Main.Gtk_Main().statusbar.change_message(\"Construct ROBDD ...\")\n Gtk_Main.Gtk_Main().update_interface()\n # Add check for reduce rule number\n for fw in self.tmp_fw_list:\n t0 = time.time()\n fw.build_bdd()\n message = \"ROBDD build bdd in %.3f seconds\" % (time.time() - t0)\n if len(self.tmp_fw_list) - self.tmp_fw_list.index(fw) - 1 > 0:\n message += \", %d remaining ...\" % (len(self.tmp_fw_list) - self.tmp_fw_list.index(fw) - 1)\n Gtk_Main.Gtk_Main().change_statusbar(message)\n Gtk_Main.Gtk_Main().update_interface()\n\n Gtk_Main.Gtk_Main().statusbar.change_message(\"Ready\")",
"def tab_import(self):\n global selected_only\n\n currentTab = self.tabs_widget.currentIndex()\n\n if currentTab == 0:\n LOG.info(\"Importing skin cluster data...\")\n current_file_path = self.FilePathLine.text()\n full_file_path = resolve_file_path(current_file_path)\n skinCluster_io.importSkinWeight(full_file_path, [], selected=selected_only)\n\n elif currentTab == 1:\n LOG.info(\"Importing blendShape data...\")\n current_file_path = self.FilePathLine.text()\n full_file_path = resolve_file_path(current_file_path)\n blendShape_io.import_blendshape(full_file_path)\n\n elif currentTab == 2:\n LOG.info(\"Importing control curve data...\")\n current_file_path = self.FilePathLine.text()\n full_file_path = resolve_file_path(current_file_path)\n curve_io.import_control_curves(full_file_path)\n\n elif currentTab == 3:\n LOG.info(\"Importing attribute data...\")\n current_file_path = self.FilePathLine.text()\n full_file_path = resolve_file_path(current_file_path)\n attribute_io.import_attributes(full_file_path)\n\n elif currentTab == 4:\n LOG.info(\"Importing Set Driven Keyframe data...\")\n current_file_path = self.FilePathLine.text()\n full_file_path = resolve_file_path(current_file_path)\n\n elif currentTab == 5:\n LOG.info(\"Importing SHAPES blendshape data...\")\n #current_file_path = self.FilePathLine.text()\n #full_file_path = resolve_file_path(current_file_path)\n mel.eval(\"SHAPES;\")\n mel.eval('shapesUtil_importFromCustomPath \"D:/dev/reef/SourceArt/Characters/RainbowParrot/Rig/work/RainbowParrot/data/SHAPES/setup\";')",
"def _import(self,widget,data=None):\n\n all_category = Channel_Store.ALL\n if self.imported_data is not None:\n for channel in self.imported_data:\n imported_channel = Channel(self.imported_data[channel])\n all_category.add(channel,imported_channel)\n (channel_name,image_url,desc) = (channel\n ,imported_channel.get_thumbnail_url()\n ,imported_channel.get_description())\n scaled_image = graphics_util.get_scaled_image(image_url,180)\n self.app_window.icon_list_store.append([scaled_image,channel_name,desc])\n self.box.import_box.destroy()",
"def buttonImportLeague_clicked(self):\n #print(\"Import Team\")\n dialog = QFileDialog(self)\n dialog.setAcceptMode(QFileDialog.AcceptOpen)\n dialog.setNameFilters([\"All files (*.*)\", \"CSV (*.csv)\"])\n dialog.selectNameFilter(\"CSV (*.csv)\")\n if dialog.exec_() == QtWidgets.QDialog.Accepted:\n filepath = dialog.selectedFiles()[0]\n self._db.instance().import_league(self.league.name, filepath) # league name comes from previous screen\n self.league = self._db.instance().leagues[len(self._db.instance().leagues)-1] # most recent added league\n self.update_ui()\n #print(\"Successful Import\")\n else:\n self.warn(\"File Import Cancelled\", \"Unable to import the specified file.\")",
"def browse_csv(inst):\n try:\n from tkinter import filedialog as fd\n except ImportError as err:\n msgbox.showerror(\"Error\",f\"Error loading module : {err}\")\n else:\n inst.temp_path=\"\"\n inst.filepath=fd.askopenfilename(title=\"Select .csv file\",initialdir=inst.fDir,filetypes=[(\"CSV files\",\".csv\")])\n global copy_path\n copy_path=inst.filepath\n if inst.filepath:\n inst.temp_path=copy.deepcopy(inst.filepath)\n inst.entry2.configure(state=\"active\")\n inst.entry2.delete(0,tk.END)\n inst.entry2.insert(0,inst.temp_path)\n inst.entry2.configure(state=\"readonly\")\n inst.entry3.configure(state=\"active\")\n inst.entry3.delete(0,tk.END)\n inst.excel_sheet_name.set(None)\n inst.entry3_sheet_name.delete(0,tk.END)\n inst.entry4_username.delete(0,tk.END)\n inst.entry4_password.delete(0,tk.END)\n inst.entry4_database.delete(0,tk.END)\n inst.entry4_table.delete(0,tk.END)\n else:\n inst.entry2.configure(state=\"active\")\n inst.entry2.delete(0,tk.END)\n inst.entry2.insert(0,inst.temp_path)\n inst.entry2.configure(state=\"readonly\")\n inst.entry3.configure(state=\"active\")\n inst.entry3.delete(0,tk.END)\n inst.excel_sheet_name.set(None)\n inst.entry3_sheet_name.delete(0,tk.END)\n inst.entry4_username.delete(0,tk.END)\n inst.entry4_password.delete(0,tk.END)\n inst.entry4_database.delete(0,tk.END)\n inst.entry4_table.delete(0,tk.END)",
"def importEntry(self):\n\n dirname, entry_name = import_folder()\n\n # attempt to create a new entry (auto checks to close current entries)\n self.newEntry(default = False)\n\n # set name of entry\n self.entryName.setText(entry_name)\n\n # get each file and save it\n for folder in os.walk(dirname):\n for code_file in folder[2]:\n with open(folder[0] + '\\\\' + code_file) as f:\n new_text = f.read()\n self.addFileTab(fileName = code_file, fileText = new_text)\n\n\n # notify them that their directory structure has been preserved\n self.alert(\"Your directory structure has been preserved.\")\n\n pass",
"def import_catalog(self):\n file_name = QFileDialog.getOpenFileName(self, \"Open File\")\n if file_name[0]:\n self.current_file = file_name[0]\n file = open(file_name[0], \"r\")\n self.catalog = json.load(file)\n self.update_catalog()",
"def create_menu(subparsers: \"argparse._SubParsersAction\") -> None:\n dfetch.commands.command.Command.parser(subparsers, Import)",
"def on_import_clicked(self):\n dialog = QDialog(self.tab_widget, Qt.WindowTitleHint | Qt.WindowCloseButtonHint)\n dialog.setWindowTitle(\"Import Costs\")\n layout = QVBoxLayout(dialog)\n\n report_type_combo_box = QComboBox(dialog)\n report_type_combo_box.addItems(REPORT_TYPE_SWITCHER.keys())\n\n def import_costs():\n def wash_money(cost):\n cost = re.sub(r'[^\\d.]+', '', str(cost))\n\n return float(cost)\n\n try:\n file_path = choose_file(TSV_FILTER)\n if not file_path:\n return\n\n file = open(file_path, 'r', encoding=\"utf-8\", newline='')\n dict_reader = csv.DictReader(file, delimiter='\\t')\n\n report_type = report_type_combo_box.currentText()\n report_type_name = NAME_FIELD_SWITCHER[report_type]\n\n connection = ManageDB.create_connection(DATABASE_LOCATION)\n if connection is not None:\n all_values = []\n for row in dict_reader:\n if not row[report_type_name]: continue\n name = row[report_type_name]\n\n if not row[\"vendor\"]: continue\n vendor = row[\"vendor\"]\n\n if not row[\"start_year\"]: continue\n start_year = int(row[\"start_year\"])\n\n if not row[\"start_month\"]: continue\n start_month = int(row[\"start_month\"])\n\n if not row[\"end_year\"]: continue\n end_year = int(row[\"end_year\"])\n\n if not row[\"end_month\"]: continue\n end_month = int(row[\"end_month\"])\n\n if not row[\"original_currency\"]: continue\n original_currency = str(row[\"original_currency\"])\n\n if not row[\"cost_in_original_currency\"]: continue\n cost_in_original_currency = wash_money(row[\"cost_in_original_currency\"])\n\n if not row[\"cost_in_local_currency\"]: continue\n cost_in_local_currency = wash_money(row[\"cost_in_local_currency\"])\n\n if not row[\"cost_in_local_currency_with_tax\"]: continue\n cost_in_local_currency_with_tax = wash_money(row[\"cost_in_local_currency_with_tax\"])\n\n begin_date = QDate(start_year, start_month, 1)\n end_date = QDate(end_year, end_month, 1)\n if begin_date > end_date:\n continue\n\n values = self.get_insert_sql_values(\n begin_date, end_date, report_type, name, vendor, cost_in_original_currency,\n original_currency, cost_in_local_currency, cost_in_local_currency_with_tax)\n all_values += values\n\n sql_text, data = ManageDB.replace_costs_sql_text(report_type, tuple(all_values))\n ManageDB.run_sql(connection, sql_text, data, False)\n\n connection.close()\n file.close()\n ManageDB.backup_costs_data(report_type)\n self.update_costs()\n show_message('Import successful')\n dialog.close()\n\n except Exception as e:\n show_message(f\"File import failed: {e}\")\n\n button_box = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel, dialog)\n button_box.accepted.connect(import_costs)\n button_box.rejected.connect(lambda: dialog.close())\n\n layout.addWidget(report_type_combo_box)\n layout.addWidget(button_box)\n\n dialog.exec_()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests the API endpoint to query margin open orders
|
def test_margin_open_orders():
client = Client(key, secret)
response = client.margin_open_orders(**params)
response.should.equal(mock_item)
|
[
"def open_orders():\n return _make_request('orders/own', private=True)['orders']",
"def test_get_all_orders(self, client, auth_token):\n\n response = client.get(\n \"/api/v1/parcels\",\n headers=dict(Authorization=\"Bearer \" + auth_token))\n\n res_data = json.loads(response.get_data(as_text=True))\n assert response.status_code == 200\n assert 'OK' in res_data['status']\n assert 'pending delivery' in str(res_data['Parcels'])",
"def get_all_open_orders(self):\n orders = self.client.get_open_orders()\n print(orders)\n for x in range(len(orders)):\n if orders[x]['side'] == 'BUY':\n print('Buy {} price {}'.format(orders[x]['symbol'] ,orders[x]['price']))\n\n else:\n print('Sell {} price {}'.format(orders[x]['symbol'] ,orders[x]['price']))\n\n if orders == []:\n print('there is NO open order!!')\n return orders",
"def test_retrieve_orders(self):\n\n Pizza.objects.create(flavour='Vegan',\n prices={\"S\": 10.00, \"M\": 15.00, \"L\": 20.00})\n Pizza.objects.create(flavour='Dessert',\n prices={\"S\": 10.00, \"M\": 15.00, \"L\": 20.00})\n\n pizzas = Pizza.objects.all().order_by('-flavour')\n orders = create_orders(pizzas, self.user)\n\n res = self.client.get(ORDER_URL)\n\n serializer = OrderSerializer(orders, many=True)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)",
"def test_get_order_when_no_orders_in_order_list(self):\n list = []\n result = self.client.get(\n '/api/v1/orders/',\n content_type = 'application/json',\n data = json.dumps(list)\n )\n #tests\n self.assertEqual(result.status,'404 NOT FOUND')\n self.assertIn('no orders posted yet',str(result.data))",
"def test_retrieve_order_list(self):\n params = {\n 'name': 'SecondPr',\n 'description': 'Second product description',\n 'cost': 75\n }\n product_1 = sample_product()\n product_2 = sample_product(**params)\n\n sample_order(self.user)\n sample_order(self.user, product_1, product_2)\n\n res = self.client.get(ORDERS_URL)\n\n user_orders = Order.objects.filter(owner=self.user)\n serializer = OrderSerializer(user_orders, many=True)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)",
"def open_orders(self):\n return self.get_qs().filter(~Q(status='C'))",
"def test_get_order(client, db):\r\n rv = get_order(client, 1)\r\n\r\n assert rv.status_code == 200\r\n data = rv.get_json()\r\n assert data['order_number'] == 1",
"def test_query_orders_by_customer_id(self):\n orders = self._create_orders(10)\n test_customer_id = orders[0].customer_id\n customer_id_orders = [\n order for order in orders if order.customer_id == test_customer_id]\n resp = self.app.get(\n BASE_URL, query_string=\"customer_id={}\".format(test_customer_id)\n )\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()\n self.assertEqual(len(data), len(customer_id_orders))\n # check the data just to be sure\n for orders in data:\n self.assertEqual(orders[\"customer_id\"], test_customer_id)",
"def test_mini_order_search(self):\n\n flag = \"mini\"\n api = \"mini.order.search\"\n order_sn_list = json.dumps(['SO_15309569711391310041', 'SO_15309573890039100463'])\n\n result = self.access_api(flag = flag, api = api, order_sn_list = order_sn_list)\n\n print(result[\"data_list\"])",
"def test_get_order_details_admin(self):\n CommonTestCases.admin_token_assert_count_equal(\n self,\n order_url + '/1558902658490',\n expected_response_get_order_details)",
"def test_get_specific_sale_order(self):\n self.client.post(\n '/v1/sales',\n data=json.dumps({\n 'id': 1,\n 'name': \"Watch\",\n 'quantity': 3,\n 'price': 45000\n }),\n content_type=\"application/json\"\n )\n response = self.client.get(\n '/v1/sales/1',\n content_type=\"application/json\"\n )\n self.assertEqual(response.status_code, 200)",
"def test_get_orders_view_only(client, db, sheet_id):\r\n with login_as_view_only(client):\r\n rv = get_orders(client, sheet_id)\r\n assert rv.status_code == 404\r\n\r\n with login_as_admin(client):\r\n rv = publish_planning(client, 1, sheet_id)\r\n assert rv.status_code == 200\r\n\r\n with login_as_view_only(client):\r\n rv = get_orders(client, sheet_id)\r\n assert rv.status_code == 200\r\n data = rv.get_json()['orders']\r\n assert len(data) > 5\r\n\r\n order_sheet = OrderSheet.query.get(\r\n sheet_id if sheet_id != 'latest' else 2)\r\n assert order_sheet.orders[0].order_number == data[0]['order_number']",
"def test_found_users_all_orders(self, client, auth_token):\n\n response = client.get(\n \"/api/v1/users/1/parcels\",\n headers=dict(Authorization=\"Bearer \" + auth_token),\n )\n res_data = json.loads(response.get_data(as_text=True))\n assert response.status_code == 200\n assert 'OK' in res_data['status']\n assert 'pending delivery' in str(res_data['Parcels'])",
"def test_listing_of_orders(self):\n Order.objects.create(item_name=\"Test item\", amount=\"633\", owner=self.user)\n res = self.client.get(self.orders_url)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data[0][\"item_name\"], \"an item\")",
"def test_order_can_be_added_to_list_and_fetched(self):\n self.list = []\n self.order = {'details': {\n 'id':3 ,\n 'dish': \"jgh\",\n 'description': \"description\",\n 'price': 34\n }}\n #update order (POST)\n result = self.client.post(\n '/api/v1/orders/',\n content_type = 'application/json',\n data=json.dumps(self.order)\n )\n self.list.append(self.order)\n self.assertEqual(result.status_code,201)\n self.assertIn(\"order added successfully\",str(result.data))\n\n #get order by its id (GET)\n result = self.client.get(\n '/api/v1/orders/25',\n content_type ='aplication/json',\n data = json.dumps(self.order)\n )\n self.assertEqual(result.status_code,200)\n self.assertIn('\"id\": 25',str(result.data))\n\n #try to get order by an id which doesnt exist (GET) id = 1000\n result = self.client.get(\n '/api/v1/orders/1000',\n content_type ='aplication/json',\n data = json.dumps(self.order)\n )\n self.assertEqual(result.status_code,400)\n self.assertIn('order id requested not found',str(result.data))",
"def test_get_order_list_admin(self):\n CommonTestCases.admin_token_assert_count_equal(\n self,\n order_url,\n expected_response_get_order_list)",
"def test_estimates_price_get(self):\n query_string = [('start_latitude', 1.2),\n ('start_longitude', 1.2),\n ('end_latitude', 1.2),\n ('end_longitude', 1.2)]\n response = self.client.open(\n '/v1/estimates/price',\n method='GET',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_order_details(self):\n response = parcel_models.view_order_details(1)\n self.assertEquals(response['parcel_id'], 1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generator that deserializes and provides casing objects. Doing it this way, instead of using fixtures, means we don't have to maintain the json, it will always work as it has access to the historic model.
|
def casing_codes():
path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(path, '0006_load_casing_code.json'), 'r') as json_data:
data = json.load(json_data)
for item in data:
yield item
|
[
"def test_case_insensitive(self):\n\n @KeyLookup(graph_ci, \"a\", [\"b\"], idstruct_class=CIIDStruct)\n def load_document(doc_lst):\n for d in doc_lst:\n yield d\n\n # Test Case - upper case A in id\n doc_lst = [{\"_id\": \"A:1234\"}]\n res_lst = load_document(doc_lst)\n\n res = next(res_lst)\n self.assertEqual(res[\"_id\"], \"b:1234\")\n\n # Verify that the generator is out of documents\n with self.assertRaises(StopIteration):\n next(res_lst)",
"def get_certificate_generator(self):\n client = boto3.client('s3',\n aws_access_key_id='AKIAIPZZ2DOBQEVC6V6A',\n aws_secret_access_key='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'\n )\n resp = client.list_objects_v2(Bucket=BUCKET, Prefix='certis')\n for obj in resp['Contents']:\n alg = obj['Key']\n logging.info(\"DANNNY78------------------------------------------------ %s\", alg)\n if alg.find(\"json\") > 1:\n logging.info(\"DANNNY7------------------------------------------------ %s\",alg)\n result = client.get_object(Bucket=BUCKET, Key=alg)\n resultBytes=result[\"Body\"].read()\n resultText=resultBytes.decode()\n nnnnnnn = json.dumps(resultText)\n jsonJsonJson = json.loads(nnnnnnn)\n jsonJsonJsonxx = json.loads(jsonJsonJson)\n normalized = normalize_jsonld(jsonJsonJsonxx, detect_unmapped_fields=False)\n normalizedEncode=normalized.encode('utf-8')\n\n #dataToIssue=str(resultText).encode('utf-8')\n logging.info(\"DANNNY---------------------(34)----------------------------- %s\", normalizedEncode)\n #yield resultBytes\n yield normalizedEncode",
"def dictionary_generator(file):\n file.seek(0)\n for line in file:\n line = line.strip(\"\\n\")\n yield line\n if line != line.lower():\n yield line.lower()",
"def test_deserialize_a_category(self):\n data = {\"id\": 1, \"name\": \"Cat\"}\n category = Category()\n category.deserialize(data)\n self.assertNotEqual(category, None)\n self.assertEqual(category.id, None)\n self.assertEqual(category.name, \"Cat\")",
"def __iter__(self):\r\n for k, v in self._headers.getAllRawHeaders():\r\n yield k.lower()",
"def collate_iocfinder():\r\n with open(input_filepath + \"ioc-finder_results.json\", 'r') as fp:\r\n iocfinder_json = json.load(fp)\r\n return iocfinder_json",
"def __init__(self, *args, **kwargs):\r\n dict.__init__(self, *args, **kwargs)\r\n for key, value in self.items():\r\n key_upper = to_unicode(key).upper()\r\n if key != key_upper:\r\n dict.__delitem__(self, key)\r\n self[key_upper] = value",
"def test_get_to_lowercase(self):\n assert self.parser.clean(\n \"OpenClassrooms\"\n ) == \"openclassrooms\"",
"def references(self, env, object_name, model, assoc_class, \n result_class_name, role, result_role, keys_only):\n if object_name.classname.lower() == 'pyfoo':\n model['ThePyFoo'] = object_name\n for k, v in _PyFooComps.items():\n if v == object_name['FooKey']:\n model['TheComp'] = pywbem.CIMInstanceName(classname='PyFooComponent',\n namespace=object_name.namespace, keybindings={'TheKey':k})\n yield model\n elif object_name.classname.lower() == 'pyfoocomponent':\n model['TheComp'] = object_name\n try:\n model['ThePyFoo'] = pywbem.CIMInstanceName(classname='PyFoo',\n namespace=object_name.namespace,\n keybindings={'FooKey':_PyFooComps[object_name['TheKey']]})\n yield model\n except KeyError:\n pass\n else:\n raise pywbem.CIMError(pywbem.CIM_ERR_FAILED, '')",
"def dataGenerator():\n\tfor current in data:\n\t\tauthor = current[\"author\"]\n\t\ttext = current[\"text\"]\n\t\tyield {\"id\": author, \"content\": {\"title\": author, \"text\": text}}",
"def test_recipe_from_json():\n orig_recipe = Recipe(\"Tuna pasta\", ingreds)\n new_recipe = Recipe.from_json(orig_recipe.to_json())\n assert new_recipe.name == orig_recipe.name\n assert new_recipe.ingreds == orig_recipe.ingreds",
"def setUp(self):\n\n self.customers = dict()\n self.customers[\"james_bowen\"] = Customer.objects.create(\n name='James Bowen')\n self.customers[\"amanda-arias\"] = Customer.objects.create(\n name='Amanda Arias')\n self.customers[\"beau-jeppesen\"] = Customer.objects.create(\n name='Beau Jeppesen')",
"def fake_vacancies_data(faker):\n def gen_vacancies(sources_count=1, vacancies_count=3):\n vacancies_data = []\n for s in range(sources_count):\n source_name = faker.company()\n for v in range(vacancies_count):\n vacancies_data.append({\n 'source': faker.uri(),\n 'source_name': source_name[:16],\n 'name': faker.job()\n })\n return vacancies_data\n return gen_vacancies",
"def translate(input_path, translator = None, exclusive = False, print_debug = False):\n\n with open(input_path, 'r') as file:\n if translator is None:\n for line in file:\n yield json.loads(line)\n else:\n for line in file:\n current = json.loads(line)\n yield translate_item(current, translator=translator, exclusive=exclusive, print_debug=print_debug)",
"def testDontConvertLowerToUpperCaseIfNotSpecified(self):\n data = \"\\n\".join([\">id1\", \"actgs\"])\n with patch.object(builtins, \"open\", mock_open(read_data=data)):\n reads = list(FastaReads(\"filename.fasta\", readClass=AARead))\n self.assertEqual([AARead(\"id1\", \"actgs\")], reads)",
"def _generate_collections(self):\n for collection in self.__collections:\n lo_coll = collection.lower()\n self.__setattr__(\n \"create_\" + lo_coll, self._generate_create(collection)\n )\n self.__setattr__(\n \"read_\" + lo_coll, self._generate_read(collection)\n )\n self.__setattr__(\n \"update_\" + lo_coll, self._generate_update(collection)\n )\n self.__setattr__(\n \"delete_\" + lo_coll, self._generate_delete(collection)\n )\n self.__setattr__(\n f\"read_all_{lo_coll}s\", self._generate_read_all(collection)\n )\n self.__setattr__(\n f\"search_{lo_coll}s\", self._generate_search(collection)\n )\n self.__setattr__(\n collection, self.__getattribute__(\"read_\" + lo_coll)\n )\n self.__setattr__(\n collection + \"s\",\n self.__getattribute__(\"read_all_\" + lo_coll + \"s\")\n )",
"def camelcase_dict(obj):\n u2c = underscore_to_camelcase\n return {u2c(key): value for (key, value) in obj.iteritems()}",
"def testUpperCaseSequencesOnly(self):\n reads = list(combineReads(None, [\"id acgt\"], upperCase=True))\n self.assertEqual([Read(\"id\", \"ACGT\")], reads)",
"def test_init_models(self):\n deterministic_models = dict(inspect.getmembers(pyross.deterministic,\n inspect.isclass))\n for name, model in deterministic_models.items():\n if name.startswith('S'):\n m = model(self.parameters, self.M, self.N)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Extract setup.py content as string from downladed tar
|
def _extract_setup_content(package_file, name):
tar_file = tarfile.open(fileobj=package_file)
setup_candidates = [elem for elem in tar_file.getmembers() if 'setup.py' in elem.name]
if len(setup_candidates) >= 1:
a = [elem.name for elem in setup_candidates]
setup_member = min(a, key=lambda x:len(x))
content = tar_file.extractfile(setup_member).read()
return content
else:
print "Too few candidates for setup.py in tar for package: %s" % (name, )
return ''
|
[
"def load_setup_py_file(self, pr_ref: str):\n repo = self.get_repo()\n response = repo.get_contents(\"setup.py\", ref=pr_ref)\n self._setup_py = str(response.decoded_content, \"utf-8\")",
"def _setup_body(setup_conf: SETUP_CONFIG) -> str:\n return os.linesep.join([\n 'import sys',\n 'from setuptools import setup',\n '',\n \"args = ' '.join(sys.argv).strip()\",\n 'if not any(args.endswith(suffix) for suffix in [{allowed_suffixes}]):',\n ' raise {error}',\n '',\n 'setup(',\n ' {config}',\n ')',\n ''\n ]).format(\n error=repr(ImportError(setup_conf['description'])),\n config=',{linesep} '.join([\n '{}={}'.format(key, repr(value))\n for key, value\n in sorted(setup_conf.items(), key=lambda item: item[0])\n ]).format(linesep=os.linesep),\n allowed_suffixes=', '.join(repr(each) for each in sorted(ALLOWED_SETUP_SUFFIXES))\n )",
"def extract_pkginfo(self, dest_dir=THIRDPARTY_DIR):\n\n fn = self.filename\n if fn.endswith(\".whl\"):\n fmt = \"zip\"\n elif fn.endswith(\".tar.gz\"):\n fmt = \"gztar\"\n else:\n fmt = None\n\n dist = os.path.join(dest_dir, fn)\n with tempfile.TemporaryDirectory(prefix=f\"pypi-tmp-extract-{fn}\") as td:\n shutil.unpack_archive(filename=dist, extract_dir=td, format=fmt)\n # NOTE: we only care about the first one found in the dist\n # which may not be 100% right\n for pi in fileutils.resource_iter(location=td, with_dirs=False):\n if pi.endswith(\n (\n \"PKG-INFO\",\n \"METADATA\",\n )\n ):\n with open(pi) as fi:\n return fi.read()",
"def _get_extras_from_setup(\n package_root: str,\n setup_py: str = \"setup.py\",\n extra: str = \"all\",\n include_base: bool = False,\n) -> List[str]:\n setup_py = str(Path(package_root) / setup_py)\n\n setup_txt = None\n with open(setup_py, \"+r\") as f_handle:\n setup_txt = f_handle.read()\n\n srch_txt = \"setuptools.setup(\"\n repl_txt = [\n \"def fake_setup(*args, **kwargs):\",\n \" pass\",\n \"\",\n \"fake_setup(\",\n ]\n setup_txt = setup_txt.replace(srch_txt, \"\\n\".join(repl_txt))\n\n neut_setup_py = Path(package_root) / \"neut_setup.py\"\n try:\n with open(neut_setup_py, \"+w\") as f_handle:\n f_handle.writelines(setup_txt)\n\n setup_mod = import_module(\"neut_setup\")\n extras = getattr(setup_mod, \"EXTRAS\").get(extra)\n if include_base:\n base_install = getattr(setup_mod, \"INSTALL_REQUIRES\")\n extras.extend(\n [req.strip() for req in base_install if not req.strip().startswith(\"#\")]\n )\n return sorted(list(set(extras)), key=str.casefold)\n finally:\n neut_setup_py.unlink()",
"def get_a_tarfile():\n pytest_enable_socket()\n\n \n recipe = CreateRecipe(\n \"\"\"\n trial-hg38-gaps-ucsc-v1:\n meta.yaml: |\n build:\n binary_relocation: false\n detect_binary_files_with_prefix: false\n noarch: generic\n number: 0\n extra:\n authors: mjc \n extra-files: []\n package:\n name: trial-hg38-gaps-ucsc-v1\n version: '1' \n requirements:\n build:\n - gsort\n - htslib\n - zlib\n run:\n - gsort\n - htslib\n - zlib\n source:\n path: .\n about:\n identifiers:\n genome-build: hg38\n species: Homo_sapiens\n keywords:\n - gaps\n - region\n summary: hg38 Assembly gaps from USCS\n tags:\n genomic-coordinate-base: 0-based-inclusive\n data-version: 11-Mar-2019\n data-provider: UCSC\n file-type: \n - bed\n final-files: \n - trial-hg38-gaps-ucsc-v1.bed.gz\n - trial-hg38-gaps-ucsc-v1.bed.gz.tbi\n ggd-channel: genomics\n \n recipe.sh: |\n #!/bin/sh\n set -eo pipefail -o nounset\n\n genome=https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/hg38/hg38.genome\n wget --quiet -O - http://hgdownload.cse.ucsc.edu/goldenpath/hg38/database/gap.txt.gz \\\\\n | gzip -dc \\\\\n | awk -v OFS=\"\\t\" 'BEGIN {print \"#chrom\\tstart\\tend\\tsize\\ttype\\tstrand\"} {print $2,$3,$4,$7,$8,\"+\"}' \\\\\n | gsort /dev/stdin $genome \\\\\n | bgzip -c > gaps.bed.gz\n\n tabix gaps.bed.gz \n \n post-link.sh: |\n set -eo pipefail -o nounset\n\n if [[ -z $(conda info --envs | grep \"*\" | grep -o \"\\/.*\") ]]; then\n export CONDA_ROOT=$(conda info --root)\n env_dir=$CONDA_ROOT\n export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg38/trial-hg38-gaps-ucsc-v1/1\n elif [[ $(conda info --envs | grep \"*\" | grep -o \"\\/.*\") == \"base\" ]]; then\n export CONDA_ROOT=$(conda info --root)\n env_dir=$CONDA_ROOT\n export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg38/trial-hg38-gaps-ucsc-v1/1\n else\n env_dir=$(conda info --envs | grep \"*\" | grep -o \"\\/.*\")\n export CONDA_ROOT=$env_dir\n export RECIPE_DIR=$env_dir/share/ggd/Homo_sapiens/hg38/trial-hg38-gaps-ucsc-v1/1\n fi\n\n PKG_DIR=`find \"$CONDA_SOURCE_PREFIX/pkgs/\" -name \"$PKG_NAME-$PKG_VERSION*\" | grep -v \".tar.bz2\" | grep \"$PKG_VERSION.*$PKG_BUILDNUM$\"`\n\n if [ -d $RECIPE_DIR ]; then\n rm -r $RECIPE_DIR\n fi\n\n mkdir -p $RECIPE_DIR\n\n (cd $RECIPE_DIR && bash $PKG_DIR/info/recipe/recipe.sh)\n\n cd $RECIPE_DIR\n\n ## Iterate over new files and replace file name with data package name and data version \n for f in *; do\n ext=\"${f#*.}\"\n filename=\"{f%%.*}\"\n (mv $f \"trial-hg38-gaps-ucsc-v1.$ext\")\n done\n\n ## Add environment variables \n #### File\n if [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 1 ]] ## If only one file\n then\n recipe_env_file_name=\"ggd_trial-hg38-gaps-ucsc-v1_file\"\n recipe_env_file_name=\"$(echo \"$recipe_env_file_name\" | sed 's/-/_/g')\"\n file_path=\"$(find $RECIPE_DIR -type f -maxdepth 1)\"\n\n elif [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 2 ]] ## If two files\n then\n indexed_file=`find $RECIPE_DIR -type f \\( -name \"*.tbi\" -or -name \"*.fai\" -or -name \"*.bai\" -or -name \"*.crai\" -or -name \"*.gzi\" \\) -maxdepth 1`\n if [[ ! -z \"$indexed_file\" ]] ## If index file exists\n then\n recipe_env_file_name=\"ggd_trial-hg38-gaps-ucsc-v1_file\"\n recipe_env_file_name=\"$(echo \"$recipe_env_file_name\" | sed 's/-/_/g')\"\n file_path=\"$(echo $indexed_file | sed 's/\\.[^.]*$//')\" ## remove index extension\n fi \n fi \n\n #### Dir\n recipe_env_dir_name=\"ggd_trial-hg38-gaps-ucsc-v1_dir\"\n recipe_env_dir_name=\"$(echo \"$recipe_env_dir_name\" | sed 's/-/_/g')\"\n\n activate_dir=\"$env_dir/etc/conda/activate.d\"\n deactivate_dir=\"$env_dir/etc/conda/deactivate.d\"\n\n mkdir -p $activate_dir\n mkdir -p $deactivate_dir\n\n echo \"export $recipe_env_dir_name=$RECIPE_DIR\" >> $activate_dir/env_vars.sh\n echo \"unset $recipe_env_dir_name\">> $deactivate_dir/env_vars.sh\n\n #### File\n if [[ ! -z \"${recipe_env_file_name:-}\" ]] ## If the file env variable exists, set the env file var\n then\n echo \"export $recipe_env_file_name=$file_path\" >> $activate_dir/env_vars.sh\n echo \"unset $recipe_env_file_name\">> $deactivate_dir/env_vars.sh\n fi\n\n echo 'Recipe successfully built!'\n\n checksums_file.txt: |\n \n \"\"\", from_string=True)\n\n recipe.write_recipes()\n\n ## Test a normal run of _build()\n recipe_dir_path = recipe.recipe_dirs[\"trial-hg38-gaps-ucsc-v1\"] \n ## Get yaml file\n yaml_file = yaml.safe_load(open(os.path.join(recipe_dir_path, \"meta.yaml\")))\n tarball_file_path = check_recipe._build(recipe_dir_path,yaml_file)\n\n return(tarball_file_path)",
"def build():\n shell(\"python setup.py sdist\")",
"def archive_file_contents(tar, info):\n f = tar.extractfile(info)\n return f.read()",
"def _make_spec_file(self):\n # Note that bdist_rpm can be an old style class.\n if issubclass(BdistRPMCommand, object):\n spec_file = super(BdistRPMCommand, self)._make_spec_file()\n else:\n spec_file = bdist_rpm._make_spec_file(self)\n\n python_package = 'python3'\n\n description = []\n requires = ''\n summary = ''\n in_description = False\n\n python_spec_file = []\n for line in iter(spec_file):\n if line.startswith('Summary: '):\n summary = line[9:]\n\n elif line.startswith('BuildRequires: '):\n line = (f'BuildRequires: {python_package:s}-setuptools, '\n f'{python_package:s}-devel')\n\n elif line.startswith('Requires: '):\n requires = line[10:]\n continue\n\n elif line.startswith('%description'):\n in_description = True\n\n elif line.startswith('python setup.py build'):\n if python_package == 'python3':\n line = '%py3_build'\n else:\n line = '%py2_build'\n\n elif line.startswith('python setup.py install'):\n if python_package == 'python3':\n line = '%py3_install'\n else:\n line = '%py2_install'\n\n elif line.startswith('%files'):\n lines = [\n '%files -n %{name}-data',\n '%defattr(644,root,root,755)',\n '%license LICENSE',\n '%doc ACKNOWLEDGEMENTS AUTHORS README',\n '%{_datadir}/%{name}/*',\n '',\n f'%files -n {python_package:s}-%{{name}}',\n '%defattr(644,root,root,755)',\n '%license LICENSE',\n '%doc ACKNOWLEDGEMENTS AUTHORS README']\n\n lines.extend([\n '%{python3_sitelib}/plaso/*.py',\n '%{python3_sitelib}/plaso/*/*.py',\n '%{python3_sitelib}/plaso/*/*.yaml',\n '%{python3_sitelib}/plaso/*/*/*.py',\n '%{python3_sitelib}/plaso/*/*/*.yaml',\n '%{python3_sitelib}/plaso*.egg-info/*',\n '',\n '%exclude %{_prefix}/share/doc/*',\n '%exclude %{python3_sitelib}/plaso/__pycache__/*',\n '%exclude %{python3_sitelib}/plaso/*/__pycache__/*',\n '%exclude %{python3_sitelib}/plaso/*/*/__pycache__/*'])\n\n python_spec_file.extend(lines)\n break\n\n elif line.startswith('%prep'):\n in_description = False\n\n python_spec_file.extend([\n '%package -n %{name}-data',\n f'Summary: Data files for {summary:s}',\n '',\n '%description -n %{name}-data'])\n\n python_spec_file.extend(description)\n\n python_spec_file.append(f'%package -n {python_package:s}-%{{name}}')\n python_summary = f'Python 3 module of {summary:s}'\n\n python_spec_file.extend([\n f'Requires: plaso-data >= %{{version}} {requires:s}',\n f'Summary: {python_summary:s}',\n '',\n f'%description -n {python_package:s}-%{{name}}'])\n\n python_spec_file.extend(description)\n\n python_spec_file.extend([\n '%package -n %{name}-tools',\n f'Requires: {python_package:s}-plaso >= %{{version}}',\n f'Summary: Tools for {summary:s}',\n '',\n '%description -n %{name}-tools'])\n\n python_spec_file.extend(description)\n\n elif in_description:\n # Ignore leading white lines in the description.\n if not description and not line:\n continue\n\n description.append(line)\n\n python_spec_file.append(line)\n\n python_spec_file.extend([\n '',\n '%files -n %{name}-tools',\n '%{_bindir}/*.py'])\n\n return python_spec_file",
"def read_setup(car, setup, track, extension=\"ini\"):\n content = None\n content_path = \"{}/{}/{}/{}.{}\".format(setup_dir(), car, track, setup, extension)\n if os.path.isfile(content_path):\n with open(content_path, \"r\") as content_file:\n content = content_file.read()\n return content",
"def get_package_name():\n\n # getting git repo top level\n project_root = get_generated_project_top_level()\n get_name_cmd = \"cd %s \" \\\n \" && cat setup.py | grep 'setup(name=\\\"'\" \\\n % project_root\n\n name = os.popen(get_name_cmd).read().strip(\"setup(name=\")\n name = name.strip().strip(',').strip('\"')\n\n if name == \"\":\n print(Fore.RED + \"Error getting package name: %s (%s) 😢\"\n % (name, get_name_cmd)\n + Style.RESET_ALL)\n\n exit(1)\n\n return name",
"def pkg_file(tmpdir):\n local_file = tmpdir.join('mypackage_1.0-1.deb')\n try:\n local_file.write_binary(b'testpackagecontents')\n except AttributeError:\n # python-py < v1.4.24 does not support write_binary()\n local_file.write('testpackagecontents')\n return local_file",
"def pkg_info():\n try:\n doc = __doc__.decode(\"UTF-8\")\n except (AttributeError, UnicodeError):\n doc = __doc__ # Python3, or some strangeness\n\n return dict(\n # project data & layout\n name = __name__.split('.')[0],\n ## TODO: version = re.search(r\"(?<=\\()[^)]+(?=\\))\", changelog).group(),\n package_dir = {\"\": \"src\"},\n ## TODO: packages = find_packages(projectdir / \"src\", exclude=[\"tests\"]),\n test_suite = \"nose.collector\",\n zip_safe = True,\n include_package_data = True,\n data_files = [\n (\"EGG-INFO\", [\n \"README.md\", \"LICENSE\", \"debian/changelog\",\n ]),\n ],\n entry_points = {\n \"console_scripts\": [\n \"wand = neutrino_wand.cli:run\",\n ],\n },\n\n # dependency management\n install_requires = [\n ],\n setup_requires = [\n \"docutils\",\n \"Sphinx\",\n ],\n extras_require = {\n },\n\n # PyPI\n url = \"https://github.com/jhermann/neutrino-wand\",\n license = \"Apache License Version 2.0\",\n keywords = \"python tool monitoring influxdb devops reporting visualops\",\n author = u\"Jürgen Hermann\",\n author_email = \"jh@web.de\",\n description = doc.split('.')[0].strip(),\n long_description = doc.split('.', 1)[1].strip(),\n classifiers = [\n # values at http://pypi.python.org/pypi?:action=list_classifiers\n \"Development Status :: 3 - Alpha\",\n #\"Development Status :: 4 - Beta\",\n #\"Development Status :: 5 - Production/Stable\",\n \"Operating System :: OS Independent\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Information Technology\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 2.7\",\n \"Topic :: Documentation\",\n \"Topic :: Utilities\",\n ],\n )",
"def import_info_file():\n from pydoas import _LIBDIR\n return join(_LIBDIR, join(\"data\", \"import_info.txt\"))",
"def get_installed_sources():",
"def get_buildfile_manifest(spec):\n data = {\n \"text_to_relocate\": [],\n \"binary_to_relocate\": [],\n \"link_to_relocate\": [],\n \"other\": [],\n \"binary_to_relocate_fullpath\": [],\n }\n\n exclude_list = (\".spack\", \"man\")\n\n # Do this at during tarball creation to save time when tarball unpacked.\n # Used by make_package_relative to determine binaries to change.\n for root, dirs, files in os.walk(spec.prefix, topdown=True):\n dirs[:] = [d for d in dirs if d not in exclude_list]\n\n # Directories may need to be relocated too.\n for directory in dirs:\n dir_path_name = os.path.join(root, directory)\n rel_path_name = os.path.relpath(dir_path_name, spec.prefix)\n if os.path.islink(dir_path_name):\n link = os.readlink(dir_path_name)\n if os.path.isabs(link) and link.startswith(spack.store.layout.root):\n data[\"link_to_relocate\"].append(rel_path_name)\n\n for filename in files:\n path_name = os.path.join(root, filename)\n m_type, m_subtype = fsys.mime_type(path_name)\n rel_path_name = os.path.relpath(path_name, spec.prefix)\n added = False\n\n if os.path.islink(path_name):\n link = os.readlink(path_name)\n if os.path.isabs(link):\n # Relocate absolute links into the spack tree\n if link.startswith(spack.store.layout.root):\n data[\"link_to_relocate\"].append(rel_path_name)\n added = True\n\n if relocate.needs_binary_relocation(m_type, m_subtype):\n if (\n (\n m_subtype in (\"x-executable\", \"x-sharedlib\", \"x-pie-executable\")\n and sys.platform != \"darwin\"\n )\n or (m_subtype in (\"x-mach-binary\") and sys.platform == \"darwin\")\n or (not filename.endswith(\".o\"))\n ):\n data[\"binary_to_relocate\"].append(rel_path_name)\n data[\"binary_to_relocate_fullpath\"].append(path_name)\n added = True\n\n if relocate.needs_text_relocation(m_type, m_subtype):\n data[\"text_to_relocate\"].append(rel_path_name)\n added = True\n\n if not added:\n data[\"other\"].append(path_name)\n return data",
"def extract_dpkg(dpkg):\n\n dir = os.path.splitext(dpkg)[0]\n logging.info(\"Extracting Debian pkg in dir: \" + dir)\n result = subprocess.call([\"dpkg-source\", \"--extract\", dpkg, dir],\n stdout = subprocess.DEVNULL, stderr = subprocess.DEVNULL)\n if result != 0:\n logging.info('Error while extracting package for {}'.format(dpkg))\n exit()\n return dir",
"def get_version_from_package() -> str:\n\n path = os.path.join(os.path.dirname(__file__), \"pdchaoskit/__init__.py\")\n path = os.path.normpath(os.path.abspath(path))\n with open(path) as f:\n for line in f:\n if line.startswith(\"__version__\"):\n token, version = line.split(\" = \", 1)\n version = version.replace(\"'\", \"\").strip()\n print(version)\n return version",
"def trap_setup():\n sys.path.append(os.path.abspath(os.curdir))\n trapper = SetupTrapper()\n setuptools.setup = trapper\n __import__(\"setup\",globals(),locals(),[],-1)\n return trapper.get_data()",
"def get_package_info(srcdir='.', exclude=()):\n ext_modules = []\n packages = []\n package_dir = {}\n\n # Read in existing package data, and add to it below\n setup_cfg = os.path.join(srcdir, 'setup.cfg')\n if os.path.exists(setup_cfg):\n conf = read_configuration(setup_cfg)\n if 'options' in conf and 'package_data' in conf['options']:\n package_data = conf['options']['package_data']\n else:\n package_data = {}\n else:\n package_data = {}\n\n if exclude:\n warnings.warn(\n \"Use of the exclude parameter is no longer supported since it does \"\n \"not work as expected. Use add_exclude_packages instead. Note that \"\n \"it must be called prior to any other calls from setup helpers.\",\n AstropyDeprecationWarning)\n\n # Use the find_packages tool to locate all packages and modules\n packages = find_packages(srcdir, exclude=exclude)\n\n # Update package_dir if the package lies in a subdirectory\n if srcdir != '.':\n package_dir[''] = srcdir\n\n # For each of the setup_package.py modules, extract any\n # information that is needed to install them. The build options\n # are extracted first, so that their values will be available in\n # subsequent calls to `get_extensions`, etc.\n for setuppkg in iter_setup_packages(srcdir, packages):\n if hasattr(setuppkg, 'get_build_options'):\n options = setuppkg.get_build_options()\n for option in options:\n add_command_option('build', *option)\n if hasattr(setuppkg, 'get_external_libraries'):\n libraries = setuppkg.get_external_libraries()\n for library in libraries:\n add_external_library(library)\n\n for setuppkg in iter_setup_packages(srcdir, packages):\n # get_extensions must include any Cython extensions by their .pyx\n # filename.\n if hasattr(setuppkg, 'get_extensions'):\n ext_modules.extend(setuppkg.get_extensions())\n if hasattr(setuppkg, 'get_package_data'):\n package_data.update(setuppkg.get_package_data())\n\n # Locate any .pyx files not already specified, and add their extensions in.\n # The default include dirs include numpy to facilitate numerical work.\n ext_modules.extend(get_cython_extensions(srcdir, packages, ext_modules,\n ['numpy']))\n\n # Now remove extensions that have the special name 'skip_cython', as they\n # exist Only to indicate that the cython extensions shouldn't be built\n for i, ext in reversed(list(enumerate(ext_modules))):\n if ext.name == 'skip_cython':\n del ext_modules[i]\n\n # On Microsoft compilers, we need to pass the '/MANIFEST'\n # commandline argument. This was the default on MSVC 9.0, but is\n # now required on MSVC 10.0, but it doesn't seem to hurt to add\n # it unconditionally.\n if get_compiler_option() == 'msvc':\n for ext in ext_modules:\n ext.extra_link_args.append('/MANIFEST')\n\n return {\n 'ext_modules': ext_modules,\n 'packages': packages,\n 'package_dir': package_dir,\n 'package_data': package_data,\n }",
"def get_deb_details(debpath):\n return os.popen('dpkg-deb -f %s' % debpath).read()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Instantiates and returns the metrics defined in the configuration dictionary. All arguments are expected to be handed in through the configuration via a dictionary named 'params'.
|
def create_metrics(config):
return thelper.train.utils.create_consumers(config)
|
[
"def __init__(self, metrics_params=[]):\n Config._process_metrics(metrics_params)\n self._metrics = metrics_params",
"def generate_metrics(self):\n metrics = []\n if \"metrics\" not in self._settings or not isinstance(self._settings[\"metrics\"], dict):\n return metrics\n\n for method, args in self._settings['metrics'].items():\n args = {} if args is None else args\n metrics += [build_metric(method, args)]\n return metrics",
"def generate(cls, **kwargs) -> \"PrometheusConfiguration\":\n return cls(\n **{**dict(\n description=\"Update the base_url and metrics to match your Prometheus configuration\",\n metrics=[\n PrometheusMetric(\n \"throughput\",\n servo.Unit.requests_per_second,\n query=\"rate(http_requests_total[5m])\",\n absent=AbsentMetricPolicy.ignore,\n step=\"1m\",\n ),\n PrometheusMetric(\n \"error_rate\",\n servo.Unit.percentage,\n query=\"rate(errors[5m])\",\n absent=AbsentMetricPolicy.ignore,\n step=\"1m\",\n ),\n ],\n ), **kwargs}\n )",
"def as_metric_config(self):\n return tfma.config.MetricConfig(\n class_name=self.class_name,\n module=self.module_name,\n config=json.dumps(self.config) if self.config else None)",
"def from_config(cls, config: Dict[str, Any]) -> \"ClassyMeter\":\n raise NotImplementedError",
"def factory(cls):\n parser = argparse.ArgumentParser(description=u'Consume metrics from Panoptes and send them to InfluxDB')\n\n parser.add_argument(u'--config',\n help=u'Configuration file to use for the consumer. Default: {}'.format(DEFAULT_CONFIG_FILE),\n default=DEFAULT_CONFIG_FILE)\n try:\n # Using parse_known_args is a hack to get the tests to work under nose\n # https://stackoverflow.com/questions/28976912/how-to-use-nosetests-in-python-while-also-passing\n # -accepting-arguments-for-argpar\n args = parser.parse_known_args()\n except Exception as e:\n sys.exit(u'Error parsing command line options or configuration file: {}'.format(repr(e)))\n\n try:\n return cls(args[0].config)\n except Exception as e:\n sys.exit(u'Error trying to instantiate class: {}'.format(repr(e)))",
"def pre_metrics_init(init_f):\n def wrapper(self, **kwargs):\n # Add params from base class\n self.append_params(UserMetric)\n self.assign_attributes(kwargs, 'init')\n\n # Call init\n init_f(self, **kwargs)\n\n return wrapper",
"def get_metrics_class(user_config_dict, verbose):\n metrics_class_name = user_config_dict['utils_input']['metrics_function']\n if metrics_class_name == 'None':\n metrics_class_name = 'Basic_metric_params'\n utils_filename = os.path.join(os.path.dirname(btk.__file__),\n 'utils.py')\n else:\n utils_filename = user_config_dict['utils_filename']\n utils = imp.load_source(\"\", utils_filename)\n metrics_class = getattr(utils, metrics_class_name)\n if verbose:\n print(f\"Measurement class set as {metrics_class_name} defined in \"\n f\"{utils_filename}\")\n return metrics_class",
"def get_metric_func(self):",
"def initialise_metrics(self) -> None:\n self.metrics = {\"aircraft\": {}, \"stats\": {}} # type: ignore\n\n # aircraft\n d = self.metrics[\"aircraft\"]\n for (name, label, doc) in Specs[\"aircraft\"]: # type: ignore\n d[name] = self._create_gauge_metric(label, doc)\n\n # statistics\n for group, metrics_specs in Specs[\"stats\"].items(): # type: ignore\n d = self.metrics[\"stats\"].setdefault(group, {})\n for name, label, doc in metrics_specs:\n d[name] = self._create_gauge_metric(label, doc)",
"def test_get_metrics(self):\n pass",
"def __init__(self, connection, name, sample_rate=1, tags=None):\n Metric.__init__(self, connection, name, sample_rate=sample_rate,\n tags=tags)",
"def get_metrics(self):\n return dict()",
"def get_measurement(self, params=None):\r\n\r\n extract_param = None\r\n if params is None:\r\n params = (\r\n 'freq_hz',\r\n 'period_sec',\r\n 'positive_width_sec',\r\n 'negative_width_sec',\r\n 'duty_cycle')\r\n elif isinstance(params, str):\r\n extract_param = params\r\n params = (params,)\r\n\r\n def read_frequency():\r\n \"\"\"Reads the current measurement frequency.\"\"\"\r\n try:\r\n gate_time = int(self.send('RCG'))\r\n except ValueError:\r\n raise InvalidGateTimeError('RCG returned an unrecognized gate time.')\r\n return float(self.send('RCF')) / (10.0 ** gate_time)\r\n\r\n getters = {\r\n 'freq_hz': read_frequency,\r\n 'counter': lambda: int(self.send('RCC')),\r\n 'period_sec': lambda: float(self.send('RCT')) / 1000000000.0,\r\n 'positive_width_sec': lambda: float(\r\n self.send('RC+')) / 1000000000.0,\r\n 'negative_width_sec': lambda: float(\r\n self.send('RC-')) / 1000000000.0,\r\n 'duty_cycle': lambda: float(self.send('RCD')) / 1000.0,\r\n }\r\n\r\n results = {}\r\n\r\n for param in params:\r\n if param not in getters:\r\n raise UnknownParameterError(\r\n 'Unknown parameter: %s. Valid parameters are %s' %\r\n (param, ', '.join(sorted(getters))))\r\n results[param] = getters[param]()\r\n\r\n if extract_param:\r\n return results[extract_param]\r\n\r\n return results",
"def init_prom_metrics():\n for e in MetricName:\n base_args = {'name': e.name.lower(),\n 'documentation': e.name,\n 'labelnames': supported_label_names(e)}\n metric_type = unit_to_metric_type(e)\n if metric_type == Counter:\n prom_counters[e] = Counter(**base_args)\n elif metric_type == Histogram:\n prom_histograms[e] = Histogram(**base_args, buckets=buckets_by_unit(e.unit))",
"def _define_params(self, version):\n\n if version >= [0, 90, 5]:\n # ES versions 0.90.5 and above\n additional_metrics = self.ADDITIONAL_METRICS_POST_0_90_5\n else:\n # ES version 0.90.4 and below\n additional_metrics = self.ADDITIONAL_METRICS_PRE_0_90_5\n\n self.METRICS.update(additional_metrics)\n \n if version <= [0, 90, 0]:\n # ES version 0.90.9 and below\n self.HEALTH_URL = \"/_cluster/health?pretty=true\"\n self.STATS_URL = \"/_cluster/nodes/stats?all=true\"\n self.NODES_URL = \"/_cluster/nodes?network=true\"\n\n additional_metrics = self.JVM_METRICS_PRE_0_90_10\n\n if version < [5, 0, 0]:\n # ES versions 0.90.10 and above\n # Metrics architecture changed starting with version 0.90.10\n self.HEALTH_URL = \"/_cluster/health?pretty=true\"\n self.STATS_URL = \"/_nodes/stats?all=true\"\n self.NODES_URL = \"/_nodes?network=true\"\n self.TASK_URL = \"/_cluster/pending_tasks?pretty=true\"\n\n additional_metrics = self.JVM_METRICS_POST_0_90_10\n\n else:\n # ES versions 5.0.0 and above\n # Metrics architecture changed starting with version 0.90.10\n self.HEALTH_URL = \"/_cluster/health?pretty=true\"\n self.STATS_URL = \"/_nodes/stats\"\n self.NODES_URL = \"/_nodes?network=true\"\n self.TASK_URL = \"/_cluster/pending_tasks?pretty=true\"\n\n additional_metrics = self.JVM_METRICS_POST_0_90_10\n\n self.METRICS.update(additional_metrics)\n\n if version >= [1, 0, 0]:\n self.METRICS.update(self.ADDITIONAL_METRICS_POST_1_0_0)\n\n if version < [2, 0, 0]:\n self.METRICS.update(self.ADDITIONAL_METRICS_PRE_2_0)\n if version >= [0, 90, 5]:\n self.METRICS.update(self.ADDITIONAL_METRICS_POST_0_90_5_PRE_2_0)\n if version >= [1, 0, 0]:\n self.METRICS.update(self.ADDITIONAL_METRICS_1_x)\n\n if version >= [1, 3, 0]:\n self.METRICS.update(self.ADDITIONAL_METRICS_POST_1_3_0)\n\n if version >= [1, 4, 0]:\n # ES versions 1.4 and above\n additional_metrics = self.ADDITIONAL_METRICS_POST_1_4_0\n self.METRICS.update(additional_metrics)\n\n # Version specific stats metrics about the primary shards\n additional_metrics = self.PRIMARY_SHARD_METRICS\n self.METRICS.update(additional_metrics)\n\n if version >= [1, 0, 0]:\n additional_metrics = self.PRIMARY_SHARD_METRICS_POST_1_0\n self.METRICS.update(additional_metrics)",
"def test_metrics(self):\n expected = self.base_expected()\n expected[\"data\"][\"config\"] = self.base_config()\n expected[\"data\"][\"config\"].update({\"metrics\": True})\n\n parms = argparse.Namespace(name=expected[\"metadata\"][\"name\"],\n metrics=True)\n conf = configs.ClusterConfig(parms)\n raw = templates.CMTemplate(conf).dumps()\n observed = json.loads(raw)\n observed[\"data\"][\"config\"] = yaml.load(observed[\"data\"][\"config\"],\n Loader=yaml.FullLoader)\n self.assertDictEqual(observed, expected)",
"def from_config(cls, config: Dict[str, Any]) -> \"PrecisionAtKMeter\":\n return cls(topk=config[\"topk\"])",
"def build_config(self):\n config = agent_config.Plugins()\n for metric in System.system_metrics:\n try:\n with open(os.path.join(self.template_dir, 'conf.d/' + metric + '.yaml'), 'r') as metric_template:\n default_config = yaml.load(metric_template.read())\n config[metric] = default_config\n if self.args:\n for arg in self.args:\n config[metric]['instances'][0][arg] = self.literal_eval(self.args[arg])\n log.info('\\tConfigured {0}'.format(metric))\n except (OSError, IOError):\n log.info('\\tUnable to configure {0}'.format(metric))\n continue\n\n return config"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
iterates through list of strings to find 4 unique strings.
|
def find_4_unique_strings(w, h, list_of_strings):
for i in range(0, len(list_of_strings)):
# across2 = list_of_strings[i]
down2 = list_of_strings[i]
for i in range(0, len(list_of_strings)):
# down2 = list_of_strings[i]
across2 = list_of_strings[i]
if across2 != down2:
for i in range(0, len(list_of_strings)):
# across1 = list_of_strings[i]
down1 = list_of_strings[i]
if down1 != down2 and down1 != across2:
for i in range(0, len(list_of_strings)):
# down1 = list_of_strings[i]
across1 = list_of_strings[i]
if across1 != down2 and across1 != across2 and across1 != down1:
# print('across1 {}, down1 {}, across2 {}, down2 {}'.format(across1, down1, across2, down2))
# if 4 unique strings are found, call best_fit function
if best_fit(w, h, across1, down1, across2, down2):
print('Solved It!')
print(w * h)
return w * h
else:
print('continuing')
continue
|
[
"def uniq(strings):\n return list(set(strings))",
"def unique_set(data: List[str]) -> List[str]:\n # TODO: Add the source code for method f7",
"def number_of_unique_strings(*arg):\n\n master_list = []\n for _list in arg:\n master_list = master_list + _list\n\n return len(set(master_list))",
"def is_unique_3(string):\n prev = None\n for char in sorted(string):\n if prev == char:\n return False\n prev = char\n return True",
"def anagram(main_str, str_list):\n return [_str for _str in str_list if str_list and Counter(_str) == Counter(main_str)]",
"def generate_unique_terms(words_list):\n\n\tunique_terms = []\n\tfor w in words_list:\n\t\tif w not in unique_terms:\n\t\t\tunique_terms.append(w)\n\treturn unique_terms",
"def select_unique_combs(linestrings):\n\n # create spatial index\n with ignore_shapely2_warnings():\n tree_idx = STRtree(linestrings)\n # get index of linestrings intersecting each linestring\n idx_match = get_matches(linestrings, tree_idx)\n\n # make combinations of unique possibilities\n combs = []\n for idx_comb in idx_match:\n combs.extend(list(itertools.product(*idx_comb)))\n combs = np.array(combs)\n combs.sort(axis=1)\n combs = select_unique(combs)\n\n uniq_line_combs = combs[(np.diff(combs, axis=1) != 0).flatten()]\n\n return uniq_line_combs, tree_idx",
"def lessThan4(aList):\n lessThan4List = []\n for word in aList:\n if len(word) < 4:\n lessThan4List.append(word)\n return lessThan4List",
"def GetUniqueElements(DataBase,FragmentSize):\n \n Container=[]\n counter=0\n \n for val in DataBase:\n \n try:\n newList=set(SplitString(str(val.seq),FragmentSize))\n except AttributeError:\n newList=set(SplitString(val,FragmentSize))\n \n if counter%250==0:\n Container=list(np.unique(Container))\n \n Container=Container+list(newList)\n \n return np.unique(Container)",
"def _get_unique_problem_numbers_from_list(problemStringList: list) -> list:\n uniqueProblemSet = set()\n for problemString in problemStringList:\n parsedProblemList = _get_problem_numbers_from_string(problemString)\n uniqueProblemSet = uniqueProblemSet.union(set(parsedProblemList))\n\n return list(uniqueProblemSet)",
"def unique_names_check(name_list: Optional[List[str]]):\n if name_list is None:\n return\n\n # Name uniqueness checks\n names = set()\n for name in name_list:\n if name in names:\n logging.warning(\n \"Name resolution has found more than one data loader having the same name !\\n\"\n \"In such cases, logs will nor be properly generated. \"\n \"Please rename the item to have unique names.\\n\"\n f\"Resolved name : {name}\"\n )\n else:\n names.add(name) # we need just hash key check, value is just a placeholder",
"def uniq_words(file, min_len):\n\twords=set()\t\n\tfor line in file:\n\t\tfor word in line.lower().split():\n\t\t\tgood=(re.sub('[^a-zA-Z0-9]', '', word))\n\t\t\tif len(good)==0:\n\t\t\t\tcontinue\n\t\t\tif len(good)>=min_len:\n\t\t\t\twords.add(good)\n\n\tlogging.debug('{}'.format(words))\n\n\treturn words",
"def all_substrings(strings, minlength=30):\n result = set()\n for s in strings:\n result |= substrings(s, minlength)\n # \"|=\" is the set union operator\n return result",
"def test_make_unique_chain_names(self):\n # check single letter names\n unique_chain_names = {'A','B','AA','+'}\n new_names = nu.make_unique_chain_names(unique_chain_names,3)\n self.assertEqual(new_names,['C', 'D', 'E'])\n # Test double letter names\n unique_chain_names = set(string.ascii_uppercase)\n unique_chain_names.update(set(string.ascii_lowercase))\n new_names = nu.make_unique_chain_names(unique_chain_names,5)\n self.assertEqual(new_names,['AA', 'AB', 'AC', 'BA', 'BB'])",
"def unique_streets(street_list):\n if len(street_list) > 10: # avoid accidental N^2 with large inputs.\n return []\n if len(street_list) < 2:\n return street_list\n ok_list = [street_list[0]]\n for street in street_list[1:]:\n is_ok = True\n for other in ok_list:\n if are_streets_same(street, other):\n is_ok = False\n break\n if is_ok:\n ok_list.append(street)\n return ok_list",
"def list_every_unique_recipe_tag():\n data = rs.get_recipes_from_file()\n tag_list = []\n for recipe in data:\n for tag in recipe['tags']:\n if tag.lower() not in tag_list:\n tag_list.append(tag.lower())\n tag_list.sort()\n print len(tag_list), tag_list\n return True",
"def getUniqueItems(self, data: list):\n keys = set()\n for group in data:\n keys = keys.union(group)\n return keys",
"def anagrams(list_of_str , string):\n \n occurrences_string = get_occurrences(string)\n \n for element in list_of_str:\n \n if get_occurrences(element) != occurrences_string:\n return False\n \n return True",
"def get_unique_name(prefix, lst):\n i = 1\n while True:\n name = prefix + \" \" + str(i)\n\n if i >= 100:\n break\n\n elif name not in lst:\n return name\n\n else:\n i += 1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Filters out datasets that we can't use since they are either lacking a release date or an original price. For rendering the output we also require the name and abbreviation of the platform.
|
def is_valid_dataset(platform):
if 'release_date' not in platform or not platform['release_date']:
logging.warn(u"{0} has no release date".format(platform['name']))
return False
if 'original_price' not in platform or not platform['original_price']:
logging.warn(u"{0} has no original price".format(platform['name']))
return False
if 'name' not in platform or not platform['name']:
logging.warn(u"No platform name found for given dataset")
return False
if 'abbreviation' not in platform or not platform['abbreviation']:
logging.warn(u"{0} has no abbreviation".format(platform['name']))
return False
return True
|
[
"def _datasets_line(args):\n filter_ = args['filter'] if args['filter'] else '*'\n return _render_list([str(dataset) for dataset in datalab.bigquery.Datasets(args['project'])\n if fnmatch.fnmatch(str(dataset), filter_)])",
"def test_no_deprecated_datasets(self):\n result = self.study_version_3.get_new_sourcedatasets()\n for dataset in self.datasets_v1:\n self.assertNotIn(dataset, result)\n for dataset in self.datasets_v2:\n self.assertNotIn(dataset, result)",
"def filterDatasetType(self):\n dd_type = self.filterDatasetTypeCB.currentText()\n if dd_type != '':\n selection = self.catalog\n filtered = []\n\n for dataset in selection:\n if dataset['dataset_type'] == dd_type:\n filtered.append(dataset)\n\n self.results = filtered\n\n else:\n self.results = self.catalog\n\n return",
"def check_software_file(df, software):\n\n if software == \"MaxQuant\":\n expected_columns = [\"Protein IDs\", \"Reverse\", \"Potential contaminant\"]\n if (set(expected_columns).issubset(set(df.columns.to_list()))) == False:\n st.error(\n \"This is not a valid MaxQuant file. Please check:\"\n \"http://www.coxdocs.org/doku.php?id=maxquant:table:proteingrouptable\"\n )\n\n elif software == \"AlphaPept\":\n if \"object\" in df.iloc[:, 1:].dtypes.to_list():\n st.error(\"This is not a valid AlphaPept file.\")\n\n elif software == \"DIANN\":\n expected_columns = [\n \"Protein.Group\",\n ]\n\n if (set(expected_columns).issubset(set(df.columns.to_list()))) == False:\n st.error(\"This is not a valid DIA-NN file.\")\n\n elif software == \"Spectronaut\":\n expected_columns = [\n \"PG.ProteinGroups\",\n ]\n\n if (set(expected_columns).issubset(set(df.columns.to_list()))) == False:\n st.error(\"This is not a valid Spectronaut file.\")\n\n elif software == \"FragPipe\":\n expected_columns = [\"Protein\"]\n if (set(expected_columns).issubset(set(df.columns.to_list()))) == False:\n st.error(\n \"This is not a valid FragPipe file. Please check:\"\n \"https://fragpipe.nesvilab.org/docs/tutorial_fragpipe_outputs.html#combined_proteintsv\"\n )",
"def find_duplicate_platforms(platform_name=None):\n BLOG_PLATFORMS = ['Blogspot', 'Wordpress', 'Custom', 'Tumblr']\n SOCIAL_PLATFORMS = ['Facebook', 'Pinterest', 'Twitter', 'Instagram']\n PLATFORMS = BLOG_PLATFORMS + SOCIAL_PLATFORMS if not platform_name else [platform_name]\n infs1 = Influencer.objects.filter(source='spreadsheet_import', blog_url__isnull=False)\n infs2 = Influencer.objects.filter(shelf_user__userprofile__is_trendsetter=True)\n infs = infs1 | infs2\n\n for i,inf in enumerate(infs):\n print \"Checking %d %s\" % (i, inf)\n plats = inf.platforms()\n for pname in PLATFORMS:\n pp = plats.filter(platform_name=pname)\n if pp.count() == 0:\n print \"no platform found for %s\" % pname\n if pp.count() > 1:\n print \"Duplicate found for %s\" % pname\n for p in pp:\n print p, p.url_not_found\n print \"\\n-----------\\n\"",
"def test_no_updated_datasets(self):\n result = self.study_version_3.get_new_sourcedatasets()\n for dataset in self.datasets_v3:\n self.assertNotIn(dataset, result)",
"def landsat_qa_clean_mask(dataset, platform):\n processing_options = {\n \"LANDSAT_5\": ls5_unpack_qa,\n \"LANDSAT_7\": ls7_unpack_qa,\n \"LANDSAT_8\": ls8_unpack_qa\n }\n \n #Clean mask creation to filter out pixels that are not suitable for analysis\n clear_xarray = processing_options[platform](dataset.pixel_qa, \"clear\") \n water_xarray = processing_options[platform](dataset.pixel_qa, \"water\")\n \n #use logical or statement to elect viable pixels for analysis\n return np.logical_or(clear_xarray.values.astype(bool), water_xarray.values.astype(bool))",
"def _exclusions(self):\n\n ls_cond_intol = []\n ls_cond_al = []\n ls_cond_hab = []\n\n cognito_handler = awsapi.Cognito()\n data_sets = cognito_handler.get_records_as_dict(dataset=c.DATASET_VITAL, cognito_id=self.cognitoId)\n\n # Da in data_set nicht immer 'tolerances' durch die App hinterlegt ist (wie eigentlich abgesprochen)\n # prüf ich mit \".get()\", ob der Eintrag hinterlegt ist. Eine Else Bedingung für den Fall das gar nichts\n # hinterlegt ist fehlte. Für diesen Fall sollen weder \"entlaktosifizierte\" Gerichte, noch \"entglutenisierte\"\n # Gerichte vorkommen.\n if data_sets.get('intolerances'):\n for element in data_sets['intolerances']:\n try:\n ls_cond_intol.append(mealdescription.c.__getattr__(element) == None)\n except:\n pass\n if not 'IN_LAKT' in data_sets['intolerances']:\n ls_cond_intol.append(mealdescription.c.DE_LAKT == None)\n if not 'IN_GLU' in data_sets['intolerances']:\n ls_cond_intol.append(mealdescription.c.DE_GLU == None)\n else:\n ls_cond_intol.append(mealdescription.c.DE_LAKT == None)\n ls_cond_intol.append(mealdescription.c.DE_GLU == None)\n\n if data_sets.get('allergies'):\n for element in data_sets['allergies']:\n try:\n ls_cond_al.append(mealdescription.c.__getattr__(element) == None)\n except:\n pass\n\n if data_sets.get('habits'):\n for element in data_sets['habits']:\n if element == 'VEGAN':\n element = 'VEGGIE'\n ls_cond_hab.append(mealdescription.c.__getattr__(element) == True)\n\n\n\n\n\n print data_sets\n\n return ls_cond_al + ls_cond_intol + ls_cond_hab",
"def _filter_bogus_ds(self, devtree):\n type_filter = [\"1\", \"16\", \"254\"]\n return [ds for ds in devtree if ds[\"desc_id\"] not in type_filter]",
"def test_datasets(clean_raw_data):\n dataset_names = datasets(clean_raw_data['spectrum1'])\n assert dataset_names == ['FMO_176487', 'FMO_276487', 'FMO_176715', 'FMO_276715']",
"def __check_datasets__(self):\n datasets = (\"iocage\", \"iocage/download\", \"iocage/images\",\n \"iocage/jails\", \"iocage/log\", \"iocage/releases\",\n \"iocage/templates\")\n\n for dataset in datasets:\n zfs_dataset_name = f\"{self.pool}/{dataset}\"\n try:\n ds = Dataset(zfs_dataset_name, cache=False)\n\n if not ds.exists:\n raise ZFSException(-1, 'Dataset does not exist')\n elif not ds.path:\n iocage_lib.ioc_common.logit({\n \"level\": \"EXCEPTION\",\n \"message\": f'Please set a mountpoint on {ds.name}'\n },\n _callback=self.callback)\n except ZFSException:\n # Doesn't exist\n\n if os.geteuid() != 0:\n raise RuntimeError(\"Run as root to create missing\"\n \" datasets!\")\n\n iocage_lib.ioc_common.logit({\n \"level\": \"INFO\",\n \"message\": f\"Creating {self.pool}/{dataset}\"\n },\n _callback=self.callback,\n silent=self.silent)\n\n dataset_options = {\n \"compression\": \"lz4\",\n \"aclmode\": \"passthrough\",\n \"aclinherit\": \"passthrough\"\n }\n\n with DATASET_CREATION_LOCK:\n ds = Dataset(zfs_dataset_name, cache=False)\n if not ds.exists:\n ds.create({'properties': dataset_options})\n\n prop = ds.properties.get(\"exec\")\n if prop != \"on\":\n iocage_lib.ioc_common.logit({\n \"level\": \"EXCEPTION\",\n \"message\": f\"Dataset \\\"{dataset}\\\" has \"\n f\"exec={prop} (should be on)\"\n },\n _callback=self.callback)",
"def main() -> None:\n command_line_args: Namespace = parse_command_line_args()\n start_time: DateTime = DateTime.now()\n if not command_line_args.envs:\n command_line_args.envs = ENVS\n envs_summary = \", \".join(command_line_args.envs)\n date: Date = command_line_args.date\n plan_year: str = command_line_args.plan_year\n\n if not plan_year:\n raise Exception(\"Missing required argument --plan-year\")\n\n # Create untracked datasets if --create-untracked-datasets is specified\n if command_line_args.create_untracked_datasets is True:\n logger.info(\"Not updating datasets because --create-untracked-datasets was specified\")\n if command_line_args.only_file:\n logger.info(\"Ignoring --only-file because --create-untracked-datasets was specified\")\n logger.info(f\"Creating untracked datasets using data from {date}, envs: {envs_summary}\")\n for env in command_line_args.envs:\n logger.info(f\"Loading env: {env}\")\n loader = Loader(env)\n loader.create_all_datasets(plan_year, only_untracked=True)\n # Otherwise, just update all datasets (default behavior)\n else:\n logger.info(f\"Updating datasets for {date}, envs: {envs_summary}\")\n for env in command_line_args.envs:\n logger.info(f\"Loading env: {env}\")\n loader = Loader(env)\n loader.update_all_datasets(plan_year, date, only_file=command_line_args.only_file)\n\n time_elapsed: Duration = DateTime.now() - start_time\n logger.info(f\"Finished! Time elapsed: {time_elapsed.in_words()}\")",
"def isAddOn(self):\n return self.dataset != CONFIG['default_dataset']",
"def generate_plot(platforms, output_file):\n labels = []\n values = []\n for platform in platforms:\n name = platform['name']\n adapted_price = platform['adjusted_price']\n price = platform['original_price']\n if price > 2000:\n continue #i.e. skip\n if len(name)>15:\n name=platform['abbreviation']\n #This needs to be changed in the demo\n labels.insert(0,u\"{0}\\n$ {1}\\n$ {2}\".format(name, price, round(adapted_price,2)))\n values.insert(0, adapted_price)\n\n #define the size of the bar and size of the graph \n width = 0.3\n ind = np.arange(len(values))\n fig = plt.figure(figsize=(len(labels) * 1.8, 10))\n\n ax = fig.add_subplot(1, 1, 1)\n ax.bar(ind, values, width, align='center')\n\n # Format the X and Y axis labels. Also set the ticks on the x-axis slightly\n # farther apart and give then a slight tilting effect.\n plt.ylabel('Adjusted price')\n plt.xlabel('Year / Console')\n ax.set_xticks(ind + 0.3)\n ax.set_xticklabels(labels)\n fig.autofmt_xdate()\n plt.grid(True)\n\n #plt.show(dpi=72) \n #uncomment if you want to save the file\n plt.savefig(output_file, dpi=72)",
"def separate_software_packages(df):\n\n # Original colname is really long, so using this to shorten it\n colname = 'Question 11: Please provide the name(s) of the main research software you use.'\n\n # Things to replace with a comma\n char_replacees = [';', '\\n', 'and', r',+']\n\n # Things to replace with a space\n regex_replacees = [r'\\(.*\\)', r'\\\".*\\\"', r'^https?:\\/\\/.*[\\r\\n]*']\n\n for current in char_replacees:\n df[colname] = df[colname].str.replace(current,',') \n print('replacing') \n\n for current in regex_replacees:\n df[colname] = df[colname].str.replace(current,'')\n\n # Store the location of the parsable strings (i.e. the ones with commas). It adds a True or False dependent\n # on whether it finds a comma in the appropriate row\n df['Q11_valid_data'] = df['Question 11: Please provide the name(s) of the main research software you use.'].str.contains(',')\n\n # Add to the parsable strings by also storing the location of any one-word strings (which are likely to be the name of a single software package). If it's already populated, you use the already populated data.\n df['Q11_valid_data'] = (df['Q11_valid_data']) | (df['Question 11: Please provide the name(s) of the main research software you use.'].str.count(' ') == 0)\n\n # Anything that's not in 'Q11_valid_data' is likely to be too difficult to parse, so kill it all!\n df.loc[df['Q11_valid_data'] == False, 'Question 11: Please provide the name(s) of the main research software you use.'] = np.nan\n\n print('The dataframe contains the following number of parsable rows:')\n print(df['Q11_valid_data'].value_counts())\n \n return",
"def test_get_platforms_usage(self):\n pass",
"def data_test(self):\n\n print('** OIL DATA')\n for x, y in zip(self.oil_data_amt, self.oil_data_date):\n print('\\nAmt: ' + x)\n print('Date: ' + y)\n try:\n oil_approval = raw_input('\\nIs data ok? y/n ')\n except NameError:\n oil_approval = input('\\nIs data ok? y/n ')\n\n print('\\n** EARTHQUAKE DATA')\n for x, y in zip(self.earth_data_date, self.earth_data_mag):\n print('\\nMag: ' + str(y))\n print('Date: ' + str(x))\n try:\n earthquake_approval = raw_input('\\nIs data ok? y/n ')\n except NameError:\n earthquake_approval = input('\\nIs data ok? y/n ')\n\n if earthquake_approval == 'y' and oil_approval == 'y':\n self.can_vis = True\n print('** READY FOR VISUALIZATION')\n else:\n print('** ERROR: Data not ready for vis')",
"def _check_datasets(self, datasets):\n if not isinstance(datasets, Datasets):\n datasets = Datasets(datasets)\n\n return datasets",
"def check_if_dataset_name_is_valid(name):\n available_datasets_list = fetch_list_datasets()\n assert name in available_datasets_list, 'Invalid dataset name: {}'.format(name)",
"def test_intermediate_version_no_new_current_datasets(self):\n new_dataset_v3 = factories.SourceDatasetFactory.create(source_study_version=self.study_version_3)\n result = self.study_version_2.get_new_sourcedatasets()\n self.assertNotIn(new_dataset_v3, result)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
generates a bar chart out of the given platforms and saves as a png
|
def generate_plot(platforms, output_file):
labels = []
values = []
for platform in platforms:
name = platform['name']
adapted_price = platform['adjusted_price']
price = platform['original_price']
if price > 2000:
continue #i.e. skip
if len(name)>15:
name=platform['abbreviation']
#This needs to be changed in the demo
labels.insert(0,u"{0}\n$ {1}\n$ {2}".format(name, price, round(adapted_price,2)))
values.insert(0, adapted_price)
#define the size of the bar and size of the graph
width = 0.3
ind = np.arange(len(values))
fig = plt.figure(figsize=(len(labels) * 1.8, 10))
ax = fig.add_subplot(1, 1, 1)
ax.bar(ind, values, width, align='center')
# Format the X and Y axis labels. Also set the ticks on the x-axis slightly
# farther apart and give then a slight tilting effect.
plt.ylabel('Adjusted price')
plt.xlabel('Year / Console')
ax.set_xticks(ind + 0.3)
ax.set_xticklabels(labels)
fig.autofmt_xdate()
plt.grid(True)
#plt.show(dpi=72)
#uncomment if you want to save the file
plt.savefig(output_file, dpi=72)
|
[
"def barchart(kmer_vectors: dict) -> None:\n for genome_name in kmer_vectors:\n cur_v = kmer_vectors[genome_name]\n dataset = list()\n for item in cur_v:\n dataset.append(cur_v.get(item))\n a = np.array(dataset)\n base_labels = [item for item in cur_v]\n y_pos = np.arange(len(base_labels))\n\n plt.bar(y_pos, a, align='center', alpha=0.5)\n plt.xticks(y_pos, base_labels)\n plt.ylabel(\"normalised frequency\")\n plt.xlabel(\"k-mer\")\n plt.title(genome_name)\n\n out_dir = \"/home/oisin/programs/cs318/318assignment/analysis/kmer_analysis/histograms\"\n plt.savefig(f\"{out_dir}/{genome_name}_hist.png\")\n plt.close()",
"def pd_bar_chart(drop_list, by_list, all_models_df, out_png_name):\n\n ### drop superfluous cols\n ### find best model for each data set\n bar_plot_df = all_models_df.drop(drop_list, axis=1)\n bar_plot_df = bar_plot_df.sort_values(by=by_list\\\n , ascending=[True, False])\\\n .groupby('Data Name').head(1)\n\n ### count number of times model type is best model\n bar_plot_df['Count'] = 0\n bar_plot_df = bar_plot_df.drop(by_list, axis=1)\\\n .groupby('Model Name', as_index=False)\\\n .count()\n\n ### generate plot\n ### uniform color for each model in all plots\n bar_plot = bar_plot_df.plot.bar(x='', y='Count',\\\n color=['r', 'b', 'g'], legend=False)\n bar_plot.set_ylabel('Count')\n fig = bar_plot.get_figure()\n fig.savefig(out_png_name)",
"def make_projects_chart(self):\n df, _ = reportutils.dataframe_from_json(self.data_path)\n reportutils.plot_project_bar(df)",
"def svg_generate_bar_chart(self, *elements):\n bar_height = 100\n label_height = 80\n length_factor = 4\n overall_height = bar_height + label_height\n overall_width = 100 * length_factor\n\n svg = self.svg_create_element(overall_height, overall_width)\n\n sum_all_elements = sum([length for unused, length in elements])\n\n current_pos = 0\n bar_group = document.createElementNS(self.svg_namespace, 'g')\n bar_group.setAttribute('id', 'bar_group')\n bar_group.setAttribute('stroke', 'black')\n bar_group.setAttribute('stroke-width', 2)\n\n nr_processed_elements = 0\n for title, length in elements:\n rect_len = int(100 * length / sum_all_elements) * length_factor\n\n if not rect_len:\n continue\n\n colour = self.svg_colours[nr_processed_elements % len(self.svg_colours)]\n\n rect = self.svg_create_rect(current_pos, 0, rect_len, bar_height, colour)\n bar_group.appendChild(rect)\n\n label_group = document.createElementNS(self.svg_namespace, 'g')\n label_group.setAttribute('id', title)\n colour_rect = self.svg_create_rect(0, 0, 20, 20, colour)\n colour_rect.setAttribute('stroke', 'black')\n colour_rect.setAttribute('stroke-width', 2)\n\n text = document.createElementNS(self.svg_namespace, 'text')\n text.setAttribute('x', '30')\n text.setAttribute('y', '18')\n text.textContent = title\n\n label_group.appendChild(colour_rect)\n label_group.appendChild(text)\n\n # TODO replace hardcoded values\n x = 5 + 125 * (nr_processed_elements // 2)\n y = bar_height + 10 + (nr_processed_elements % 2) * 40\n label_group.setAttribute('transform', 'translate({}, {})'.format(x, y))\n\n bar_group.appendChild(label_group)\n\n current_pos += rect_len\n nr_processed_elements += 1\n\n svg.appendChild(bar_group)\n\n return svg",
"def draw_bars(pointer,datafile):\n pointer.home()\n #Count the observations in the file\n obs=count_observations(datafile)\n t=1\n #loop over the data file to draw the bars on the chart corresponding to the values of the feature\n for i in range(obs):\n #Select random color for current bar to be drawn\n color=choose_color()\n #Draw rectangle at specified position on the chart, with height proportionate to data value of the current feature value\n draw_rectangle(pointer,i*math.floor((WIDTH-100)/obs)+40,0,40,int(datafile[t]),color)\n t=t+3",
"def make_k_bar_graph(save=False, savetype=\".pdf\", show=False,\n print_analysis=True):\n names = [r\"$y$-adv.\", r\"$z$-adv.\", r\"$y$-turb.\", r\"$z$-turb.\",\n r\"$k$-prod.\", r\"Mean diss. $(\\times 10^3)$\"]\n plt.figure(figsize=(7.5, 3.2))\n cm = plt.cm.coolwarm\n for n, U in enumerate([0.4, 0.6, 0.8, 1.0, 1.2]):\n Re_D = U*D/nu\n wm = WakeMap(U)\n tty, ttz = wm.mean_k_turb_trans_y, wm.mean_k_turb_trans_z\n kprod, meandiss = wm.k_prod, wm.mean_diss\n dKdy, dKdz = wm.dKdy, wm.dKdz\n y_R, z_H = wm.y_R, wm.z_H\n meanu, meanv, meanw = wm.df.mean_u, wm.df.mean_v, wm.df.mean_w\n quantities = [ts.average_over_area(-2*meanv/meanu*dKdy/(0.5*U**2)*D, y_R, z_H),\n ts.average_over_area(-2*meanw/meanu*dKdz/(0.5*U**2)*D, y_R, z_H),\n ts.average_over_area(2*tty/meanu/(0.5*U**2)*D, y_R, z_H),\n ts.average_over_area(2*ttz/meanu/(0.5*U**2)*D, y_R, z_H),\n ts.average_over_area(2*kprod/meanu/(0.5*U**2)*D, y_R, z_H),\n ts.average_over_area(2*meandiss/meanu/(0.5*U**2)*D*1e3, y_R, z_H)]\n ax = plt.gca()\n color = cm(int(n/4*256))\n ax.bar(np.arange(len(names))+n*0.15, quantities, color=color, edgecolor=\"black\",\n hatch=None, width=0.15,\n label=r\"$Re_D={:.1f}\\times 10^6$\".format(Re_D/1e6))\n if print_analysis:\n quantities[-1] /= 1e3\n print(\"K recovery rate at {:.1f} m/s: {:.2f} (%/D)\".format(U,\n np.sum(quantities)*100))\n ax.set_xticks(np.arange(len(names)) + 5*.15/2)\n ax.set_xticklabels(names)\n plt.hlines(0, 0, len(names), color=\"black\")\n plt.ylabel(r\"$\\frac{K \\, \\mathrm{ transport}}{UK_\\infty D^{-1}}$\")\n plt.legend(loc=\"upper right\", ncol=2)\n plt.tight_layout()\n if save:\n plt.savefig(\"Figures/K_trans_bar_graph\" + savetype)\n if show:\n plt.show()",
"def create_bar_chart(data):\n top_vehicles = sorted(data, key=lambda x: x['total_sales'], reverse=True)[:10]\n vehicle_names = [format_car(vehicle['car']) for vehicle in top_vehicles]\n total_revenues = [locale.atof(vehicle['price'].strip(\"$\")) * vehicle['total_sales'] for vehicle in top_vehicles]\n\n drawing = Drawing(width=500, height=300)\n bar_chart = HorizontalBarChart()\n bar_chart.x = 50\n bar_chart.y = 50\n bar_chart.width = 400\n bar_chart.height = 200\n bar_chart.data = [total_revenues]\n bar_chart.categoryAxis.categoryNames = vehicle_names\n drawing.add(bar_chart)\n\n return drawing",
"def plot_bar_chart(resultset, desc, filename, plotdir):\n fig = plt.figure()\n if 'title' in desc:\n plt.title(desc['title'])\n _, ax1 = plt.subplots()\n plt.grid(b=True, which='major', color='k', axis='y', linestyle=':')\n # Set axis below bars\n ax1.set_axisbelow(True)\n if 'xlabel' in desc:\n plt.xlabel(desc['xlabel'])\n if 'ylabel' in desc:\n plt.ylabel(desc['ylabel'])\n if 'filter' not in desc or desc['filter'] is None:\n desc['filter'] = {}\n plot_empty = desc['plotempty'] if 'plotempty' in desc else True\n\n ymetrics = desc['ymetrics']\n ycondnames = desc['ycondnames'] if 'ycondnames' in desc else None\n ycondvals = desc['ycondvals'] if 'ycondvals' in desc else None\n if ycondnames is not None and ycondvals is not None:\n if not len(ymetrics) == len(ycondnames) == len(ycondvals):\n raise ValueError('ymetrics, ycondnames and ycondvals must have the same length')\n # yvals is basically the list of values that differentiate each bar\n # it is used for legends and styles mainly\n yvals = ycondvals if len(set(ymetrics)) == 1 else zip(ymetrics, ycondvals)\n else:\n yvals = ymetrics\n placement = desc['placement'] if 'placement' in desc else 'grouped'\n if placement == 'grouped':\n placement = [1 for _ in range(len(yvals))]\n elif placement == 'stacked':\n placement = [len(yvals)]\n else:\n if sum(placement) != len(yvals):\n raise ValueError('Placement definition incorrect. '\n 'The sum of values of the list must be equal to '\n 'the number of y values')\n xticks = desc['xticks'] if 'xticks' in desc else desc['xvals']\n empty = True\n # Spacing attributes\n # width of a group of bars\n group_width = desc['group_width'] if 'group_width' in desc else 0.4\n width = group_width / len(placement) # width of a single bar\n separation = width / 2 # space between adjacent groups\n border = 0.6 * separation # left and right borders\n\n elem = collections.defaultdict(int) # bar objects (for legend)\n # Select colors and hatches\n if 'bar_color' in desc and all(y in desc['bar_color'] for y in yvals):\n color = desc['bar_color']\n elif len(yvals) <= len(BW_COLOR_CATALOGUE):\n color = dict((y, BW_COLOR_CATALOGUE[yvals.index(y)]) for y in yvals)\n else:\n color = collections.defaultdict(lambda: None)\n if 'bar_hatch' in desc and desc['bar_hatch'] is None:\n hatch = collections.defaultdict(lambda: None)\n elif 'bar_hatch' in desc and all(y in desc['bar_hatch'] for y in yvals):\n hatch = desc['bar_hatch']\n elif len(yvals) <= len(BW_COLOR_CATALOGUE):\n hatch = dict((y, HATCH_CATALOGUE[yvals.index(y)]) for y in yvals)\n else:\n hatch = collections.defaultdict(lambda: None)\n # Plot bars\n left = border # left-most point of the bar about to draw\n for i in range(len(desc['xvals'])):\n l = 0\n for x in placement:\n bottom = 0 # Bottom point of a bar. It is alway 0 if stacked is False\n for y in range(x):\n condition = Tree(desc['filter'])\n condition.setval(desc['xparam'], desc['xvals'][i])\n if ycondnames is not None:\n condition.setval(ycondnames[l], ycondvals[l])\n data = [v.getval(ymetrics[l])\n for _, v in resultset.filter(condition)\n if v.getval(ymetrics[l]) is not None]\n confidence = desc['confidence'] if 'confidence' in desc else 0.95\n meanval, err = means_confidence_interval(data, confidence)\n yerr = None if 'errorbar' in desc and not desc['errorbar'] else err\n if not np.isnan(meanval):\n empty = False\n elem[yvals[l]] = plt.bar(left, meanval, width,\n color=color[yvals[l]],\n yerr=yerr, bottom=bottom, ecolor='k',\n hatch=hatch[yvals[l]], label=yvals[l])\n bottom += meanval\n l += 1\n left += width\n left += separation\n if empty and not plot_empty:\n return\n n_bars = len(placement)\n plt.xticks(border + 0.5 * (n_bars * width) +\n (separation + n_bars * width) * np.arange(len(xticks)),\n xticks)\n if 'legend' in desc:\n legend = [desc['legend'][l] for l in yvals] if 'legend'in desc else yvals\n legend_args = desc['legend_args'] if 'legend_args' in desc else {}\n if 'legend_loc' in desc:\n legend_args['loc'] = desc['legend_loc']\n plt.legend([elem[x] for x in yvals], legend,\n prop={'size': LEGEND_SIZE},\n **legend_args)\n xmin, _ = plt.xlim()\n plt.xlim(xmin, left - separation + border)\n if 'ymax' in desc:\n plt.ylim(ymax=desc['ymax'])\n plt.savefig(os.path.join(plotdir, filename), bbox_inches='tight')\n plt.close(fig)",
"def bar_chart(values, xticks, title, xlabel, ylabel, barColor='b', barAlpha=1):\n if len(values)!=len(xticks):\n print 'Error: debe haber tantos grupos como etiquetas de barras.'\n return\n #Draw graph\n ind=np.arange(len(values))\n width=0.5\n p1 = plt.bar(ind, values, width, color=barColor, alpha=barAlpha)\n #Draw labels and titles\n plt.ylabel(ylabel)\n plt.xlabel(xlabel)\n plt.title(title)\n plt.xticks(ind+width/2., xticks ,rotation='horizontal')\n #Show\n plt.show()\n return p1",
"def drawBars (bars, window):\n\tfor index, bar in enumerate(bars): \n\t\tdrawBar (window, index, len(bars), bar, LBLUE)",
"def barGraph(subfolder, combinedData, labels):\n\tplt.clf()\n\tdf = pd.DataFrame.from_dict(combinedData)\n\tlabel_dict = dict(enumerate(labels))\n\tdf['y'] = df['y'].map(label_dict)\n\tfig = plt.figure(figsize=(10,11))\n\tdf.groupby('y').x_str.count().plot.bar(ylim=0)\n\tplt.title(\"All Data (Train & Test Sets) by Category \", fontsize='xx-large', pad=40)\n\tplt.ylabel(\"Qty Tweets\", fontsize='large', labelpad=20)\n\tplt.xlabel(\"Category\", fontsize='large', labelpad=20)\n\tplt.savefig(subfolder + \"/category_distribution.png\")",
"def house_bar_chart(results):\n colours = [PARTY_COLOURS[party] for party in results.index]\n plt.bar(results.index, results.values, color=colours)\n\n plt.ylim(0, 180)\n\n for i, v in enumerate(results.values):\n plt.text(i - 0.18, v + 10, str(v), color=colours[i])\n\n plt.show()",
"def init_bar_plot():\n _fig, axis = plt.subplots()\n axis.set_title('Stacked bar plot histogram (BGR)')\n axis.set_xlabel('Bin')\n axis.set_ylabel('Frequency (num of pixels)')\n axis.set_ylim(0, 700000)\n\n _n, _bins, bars_blue = axis.hist(0, const.BINS, rwidth=const.BAR_WIDTH,\n histtype='bar', stacked=True, color='blue', label='Blue')\n _n, _bins, bars_green = axis.hist(0, const.BINS, rwidth=const.BAR_WIDTH,\n histtype='bar', stacked=True, color='green', label='Green')\n _n, _bins, bars_red = axis.hist(0, const.BINS, rwidth=const.BAR_WIDTH,\n histtype='bar', stacked=True, color='red', label='Red')\n\n axis.legend()\n\n return bars_blue, bars_green, bars_red",
"def generate_plot(\n names: List[str],\n scores: List[float],\n output_dir: str,\n title: str = \"Game scores\",\n bands: Dict = None,\n x_offset: float = 0.1,\n dpi: int = 150,\n palette: str = \"Blues\",\n):\n _, ax = plt.subplots(1, 1, figsize=(10, 6), dpi=dpi)\n sns.barplot(\n x=names, y=scores, palette=_get_palette_from_values(palette, scores), ax=ax\n )\n if bands:\n _add_labeled_bands(bands, ax, x_offset)\n _format_output(ax, title)\n save_path = f\"{output_dir}/{title.replace(' ', '_')}.png\"\n plt.savefig(save_path, bbox_inches=\"tight\")\n logger.info(\"%s plot saved to %s\", title, save_path)",
"def plotDistribuMap(df, columns, kind = 'bar'):",
"def bar_graph(self, dataset):\n return self._plot_standard('bar', dataset)",
"def a_picture_city(self):\r\n result1 = []\r\n year = ['2000','2005','2010','2013','2014','2015','2016','2017','2018']\r\n for i in range(1,10):\r\n get_data = int(self.data[self.k][i])\r\n result1.append(get_data)\r\n df = pd.DataFrame({'The Number of Industrial Enterprises': result1}, index=year)\r\n ax = df.plot.bar(rot=0)\r\n ax.set_title('{}'.format(self.data[self.k][0]))",
"def make_barchart2(data, typesplot):\n config = define_config()\n mystyle = define_style()\n labels = list(data.index)\n values = list(data.loc[:, \"counts\"])\n plot = pygal.HorizontalBar(config, style=mystyle)\n plot.title = \"Frequency of Semantically Open Edits\"\n plot.x_labels = labels\n plot.add(\"types\", [\n {\"value\": values[0], \"color\": \"DarkSlateGray\", \"label\": labels[0]},\n {\"value\": values[1], \"color\": \"DarkSlateGray\", \"label\": labels[1]},\n {\"value\": values[2], \"color\": \"DarkSlateGray\", \"label\": labels[2]},\n {\"value\": values[3], \"color\": \"DarkSlateGray\", \"label\": labels[3]},\n {\"value\": values[4], \"color\": \"DarkSlateGray\", \"label\": labels[4]},\n {\"value\": values[5], \"color\": \"DarkSlateGray\", \"label\": labels[5]},\n {\"value\": values[6], \"color\": \"DarkSlateGray\", \"label\": labels[6]},\n ], stroke_style={\"width\": 0})\n plot.render_to_file(typesplot)\n print(\"Looking good: semantically open edits visualization has been saved.\")",
"def plotBars(a):\n n = len(a)\n stddraw.setXscale(-1, n)\n for i in range(n):\n stddraw.filledRectangle(i-0.25, 0.0, 0.5, a[i])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Screen scr the screen object num x, y, w1, h1, w2, h2 the loc and sizes of the two buttons tup color1, color2 the colors of the two buttons func resp the response to the click
|
def __init__(self, scr, x, y, w, h, color1, color2, size_factor=1, resp=lambda: None, delay_time=0.3):
if size_factor < 1:
raise InvalidBorderButtonError(w, h, w * size_factor, h * size_factor)
self.rect = Rect(scr, color1, x, y, w, h)
self.button = Button(scr, x + ((w - w * size_factor) / 2), y + ((h - h * size_factor) / 2), w * size_factor,
h * size_factor, color2, resp, delay_time)
return
|
[
"def button1(msg,x,y,w,h,ic,ac,action=None):\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n if x+w>mouse[0]>x and y+h>mouse[1]>y:\n pygame.draw.rect(screen,ac,(x,y,w,h))\n if click[0]==1 and action!=None:\n action()\n else:\n pygame.draw.rect(screen,ic,(x,y,w,h))\n #Displays message on button\n small_text = pygame.font.SysFont(\"timesnewromanboldttf\",25)\n text_s = small_text.render(msg,True, (85,107,47))\n text_rect_s = text_s.get_rect()\n text_rect_s.center = (x+(w//2),y+(h//2))\n screen.blit(text_s, text_rect_s)",
"def create_buttons(self):\r\n pos1 = [self.pos[0] + self.width + 10, self.pos[1] + self.height//2]\r\n pos2 = [self.pos[0]+self.width//2, self.pos[1]+self.height+10]\r\n pos3 = [self.pos[0], self.pos[1] + self.height]\r\n b1 = Button(self.game, pos1, 0, 0,\r\n 'speed : ', self.speed, value_min=0.05, value_max=0.2, step=0.01)\r\n b2 = Button(self.game, pos2, 0, 0,\r\n 'speed sprint: ', self.speed_sprint, value_min=0.1, value_max=0.5, step=0.01)\r\n b3 = Button(self.game, pos3, 0, 0, 'jump height', round(self.v0_max/v0_max,2), value_min=0.5, value_max=1.5, step=0.1)\r\n return [b1, b2, b3]",
"def test_button(self):\n # generic button\n button = Button(10, 20, 50, 60)\n self.assertEqual(pg.Rect(-15, 20, 50, 60), button.rect)\n self.assertEqual((10, 20), button.rect.midtop)\n self.assertEqual((255, 255, 255), button.return_color())\n\n # turn button\n turn_button = Button(0, 0, 20, 20, color=(255, 255, 255))\n self.assertEqual((255, 255, 255), turn_button.return_color())\n\n # end button\n end_button = Button(0, 0, 20, 20, color=(0, 0, 0))\n self.assertEqual((0, 0, 0), end_button.return_color())",
"def __init__( self, labels, ycoord, percentage, win ):\n height = percentage * win.getHeight() / 100\n self.buttons = []\n for i in range( len( labels ) ):\n self.buttons.append( Button( win, Point( win.getWidth() / 2,\n height / len( labels ) * ( 1 / 2 + i ) + ycoord ),\n self.fillpercent * win.getWidth() / 100,\n self.fillpercent * height / len( labels ) / 100, labels[i] ) )\n self.buttons[-1].activate()\n return",
"def drawCommandButton(p1, p2, text):\n global win\n cmd1 = Rectangle(p1, p2)\n cmd1Text = Text(Point(0.5 * (p2.getX() + p1.getX()), 0.5 * (p2.getY() + p1.getY())), text)\n cmd1.setFill('blue')\n cmd1.setOutline('black')\n cmd1Text.setTextColor('white')\n cmd1.draw(win)\n cmd1Text.draw(win)",
"def drawBoard(self):\n \n self.button_Font = (\"Arial\", 68, \"bold\")\n self.button_List = []\n \n for boxes in range(9):\n self.button_List.append(tkinter.Button(self.main_window, text = \"\",\n font = self.button_Font, bg = \"black\", fg = \"white\", width = 3, height = 1,\n command = lambda pos = boxes: self.boxPressed(pos)))\n index = 0\n for r in range(3):\n for col in range(3):\n self.button_List[index].grid(row = r, column = col)\n index += 1",
"def _setup_menus(self):\n \n btn = pygame.Surface((200, 50))\n btn.set_alpha(90)\n btn.fill(WHITE)\n #loadscreen buttns\n offset = 70\n self._logotext = self._logofont.render('CrossCrib', True, WHITE)\n self._logotext_rect = self._logotext.get_rect(\n centerx=self._screen_rect.centerx,\n y=150)\n self._ldscrnng = btn.copy()\n self._ldscrnng_rect = self._ldscrnng.get_rect(\n centerx=self._screen_rect.centerx,\n y=290)\n self._ldscrnrsm = btn.copy()\n self._ldscrnrsm_rect = self._ldscrnrsm.get_rect(\n centerx=self._screen_rect.centerx,\n y=self._ldscrnng_rect.y + offset)\n self._ldscrnhowto = btn.copy()\n self._ldscrnhowto_rect = self._ldscrnhowto.get_rect(\n centerx=self._screen_rect.centerx,\n y=self._ldscrnrsm_rect.y + offset)\n self._ldscrnquit = btn.copy()\n self._ldscrnquit_rect = self._ldscrnquit.get_rect(\n centerx=self._screen_rect.centerx,\n y=self._ldscrnhowto_rect.y + offset)\n #in game buttons\n self._continue_button = btn.copy()\n self._continue_rect = self._continue_button.get_rect(left=550,top=610)\n self._gameoverng = btn.copy()\n self._gameoverng_rect = self._continue_button.get_rect(\n centerx=self._screen_rect.centerx,\n y=390)\n self._deal_btn = btn.copy()\n self._deal_rect = self._deal_btn.get_rect(left=550, top=340)\n \n #game menu\n self._menu_open = False\n self._menu = pygame.Surface((136, 150))\n self._menu_rect = self._menu.get_rect(left=750, top=25)\n self._menubtn = pygame.Surface((150,15))\n self._menubtn.set_alpha(0)\n self._menubtn_rect = self._menubtn.get_rect(left=850, top=12)\n btn = pygame.Surface((136,50))\n btn.set_alpha(90)\n btn.fill(WHITE)\n self._mnewgame = btn.copy()\n self._mnewgame_rect = self._mnewgame.get_rect(left=750, top=25)\n self._mngtext = self._font.render('Newgame', True, BLACK)\n self._mngtext_rect = self._mngtext.get_rect(\n centerx=self._mnewgame_rect.centerx,\n centery=self._mnewgame_rect.centery)\n padding = self._mngtext_rect.x - self._mnewgame_rect.x \n self._opt_menu = pygame.Surface((136, 150))\n self._opt_menu_open = False\n self._opt_menu_rect = self._opt_menu.get_rect(left=615, top=75)\n self._opt_autodeal = btn.copy()\n self._autodeal_rect = self._opt_autodeal.get_rect(left=615, top=75)\n self._autodealtext = self._font.render('Autodeal', True, BLACK)\n self._autodealtext_rect = self._autodealtext.get_rect(\n x=self._autodeal_rect.x + padding,\n centery=self._autodeal_rect.centery)\n self._opt_autocut = btn.copy()\n self._autocut_rect = self._opt_autocut.get_rect(left=615, top=125)\n self._autocuttext = self._font.render('Autocut', True, BLACK)\n self._autocuttext_rect = self._autocuttext.get_rect(\n x=self._autocut_rect.x + padding,\n centery=self._autocut_rect.centery)\n self._opt_soundonoff = btn.copy()\n self._soundonoff_rect = self._opt_soundonoff.get_rect(left=615, top=175)\n self._soundtext = self._font.render('Mute', True, BLACK)\n self._soundtext_rect = self._soundtext.get_rect(\n x=self._soundonoff_rect.x + padding,\n centery=self._soundonoff_rect.centery)\n self._moptions = btn.copy()\n self._moptions_rect = self._moptions.get_rect(left=750, top=75)\n self._moptext = self._font.render('Options', True, BLACK)\n self._moptext_rect = self._moptext.get_rect(\n x=self._mngtext_rect.x,\n centery=self._moptions_rect.centery)\n self._mquit = btn.copy()\n self._mquit_rect = self._mquit.get_rect(left=750, top=125)\n self._mqtext = self._font.render('Quit', True, BLACK)\n self._mqtext_rect = self._mqtext.get_rect(\n x=self._mngtext_rect.x,\n centery=self._mquit_rect.centery)\n menu_text = self._smallfont.render('Menu', True, BLACK)\n self._background.blit(menu_text, (850,12))",
"def __click_event(self, event):\n\n if self.board is None:\n return\n\n largeur = self.canvas.winfo_width()\n hauteur = self.canvas.winfo_height()\n\n colomne_space = largeur / self.board.width\n ligne_space = hauteur / self.board.height\n\n # on recupaire le position dans la grille\n grid_pos_x = floor(event.x / colomne_space)\n grid_pos_y = floor(event.y / ligne_space)\n try:\n # Si on a fait un click gauche et que on a choisi de placer un joueur\n if self.select_set.get() == 1:\n print(\"player\")\n self.delete_shape_board(self.board.player_pos[0], self.board.player_pos[1])\n self.board.mat[self.board.player_pos[0]][self.board.player_pos[1]] = \\\n Case(Case.VIDE, self.board.recompence[Board.VIDE])\n\n self.delete_shape_board(grid_pos_y, grid_pos_x)\n self.board.mat[grid_pos_y][grid_pos_x] = Case(Case.START)\n self.board.player_pos[0] = grid_pos_y\n self.board.player_pos[1] = grid_pos_x\n self.draw_player(grid_pos_y, grid_pos_x)\n\n # Si on a fait un click gauche et que on a choisi de placer la cible\n elif self.select_set.get() == 2:\n print(\"target\")\n self.delete_shape_board(self.board.target_pos[0], self.board.target_pos[1])\n self.board.mat[self.board.target_pos[0]][self.board.target_pos[1]] = \\\n Case(Case.VIDE, self.board.recompence[Board.VIDE])\n\n self.delete_shape_board(grid_pos_y, grid_pos_x)\n self.board.mat[grid_pos_y][grid_pos_x] = Case(Case.FIN, self.board.recompence[Board.FIN])\n self.board.target_pos[0] = grid_pos_y\n self.board.target_pos[1] = grid_pos_x\n self.draw_target(grid_pos_y, grid_pos_x)\n\n elif self.select_set.get() == 3:\n print(\"Obstacle\")\n self.delete_shape_board(grid_pos_y, grid_pos_x)\n self.board.mat[grid_pos_y][grid_pos_x] = Case(Case.OBSTACLE)\n self.draw_obstacle(grid_pos_y, grid_pos_x)\n\n elif self.select_set.get() == 4:\n print(\"Danger\")\n self.delete_shape_board(grid_pos_y, grid_pos_x)\n self.board.mat[grid_pos_y][grid_pos_x] = Case(Case.DANGER, self.board.recompence[Board.DANGER])\n self.draw_danger(grid_pos_y, grid_pos_x)\n except IndexError:\n print(\"Error index\")",
"def _create_buttons(self, share_button, move_buttons, jump_button, \n top_label):\n if top_label:\n self.top_label = Gtk.Label(label=top_label)\n self.top_label.set_use_markup(True)\n self.track_ref_for_deletion(\"top_label\")\n\n self.add_btn = SimpleButton(Gtk.STOCK_ADD, self.add_button_clicked)\n self.edit_btn = SimpleButton(Gtk.STOCK_EDIT, self.edit_button_clicked)\n self.del_btn = SimpleButton(Gtk.STOCK_REMOVE, self.del_button_clicked)\n self.track_ref_for_deletion(\"add_btn\")\n self.track_ref_for_deletion(\"edit_btn\")\n self.track_ref_for_deletion(\"del_btn\")\n\n self.add_btn.set_tooltip_text(self._MSG['add'])\n self.edit_btn.set_tooltip_text(self._MSG['edit'])\n self.del_btn.set_tooltip_text(self._MSG['del'])\n \n if share_button:\n self.share_btn = SimpleButton(Gtk.STOCK_INDEX, self.share_button_clicked)\n self.share_btn.set_tooltip_text(self._MSG['share'])\n self.track_ref_for_deletion(\"share_btn\")\n else:\n self.share_btn = None\n \n if move_buttons:\n self.up_btn = SimpleButton(Gtk.STOCK_GO_UP, self.up_button_clicked)\n self.up_btn.set_tooltip_text(self._MSG['up'])\n self.down_btn = SimpleButton(Gtk.STOCK_GO_DOWN, \n self.down_button_clicked)\n self.down_btn.set_tooltip_text(self._MSG['down'])\n self.track_ref_for_deletion(\"up_btn\")\n self.track_ref_for_deletion(\"down_btn\")\n else:\n self.up_btn = None\n self.down_btn = None\n\n if jump_button:\n self.jump_btn = SimpleButton(Gtk.STOCK_JUMP_TO, self.jump_button_clicked)\n self.track_ref_for_deletion(\"jump_btn\")\n self.jump_btn.set_tooltip_text(self._MSG['jump'])\n else:\n self.jump_btn = None\n\n hbox = Gtk.HBox()\n hbox.set_spacing(6)\n if top_label:\n hbox.pack_start(self.top_label, False, True, 0)\n hbox.pack_start(self.add_btn, False, True, 0)\n if share_button:\n hbox.pack_start(self.share_btn, False, True, 0)\n hbox.pack_start(self.edit_btn, False, True, 0)\n hbox.pack_start(self.del_btn, False, True, 0)\n if move_buttons:\n hbox.pack_start(self.up_btn, False, True, 0)\n hbox.pack_start(self.down_btn, False, True, 0)\n\n if self.jump_btn:\n hbox.pack_start(self.jump_btn, False, True, 0)\n hbox.show_all()\n self.pack_start(hbox, False, True, 0)\n\n if self.dbstate.db.readonly:\n self.add_btn.set_sensitive(False)\n self.del_btn.set_sensitive(False)\n if share_button:\n self.share_btn.set_sensitive(False)\n if jump_button and self.jump_btn:\n self.jump_btn.set_sensitive(False)\n if move_buttons:\n self.up_btn.set_sensitive(False)\n self.down_btn.set_sensitive(False)",
"def new_win(color1,color2):\n my_font = pygame.font.SysFont(\"comicsansms\",90)\n text = my_font.render(\"GAME OVER\",True, (85,107,47))\n text_rect = text.get_rect()\n text_rect.center = (300,160)\n screen.blit(text, text_rect)\n dark_green = (85,107,47)\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit()\n button1(\"Try Again\",90,260,115,27,color2,color1,reset_game)\n button1(\"Quit\",435,260,65,27,color2,color1,quitgame)\n game_wall()\n pygame.display.update()",
"def screen(*args, **kwargs):\n actual_screen(*args, **kwargs)",
"def top_frame_contents(self):\r\n self.closeButton = tk.Button(self.top_frame, text='Close', fg=\"black\",\r\n bg=\"gray90\", font=self.label_font, bd=2,\r\n highlightthickness=0,\r\n command=self.close_robot)\r\n self.closeButton.pack(padx=15, side=\"right\")\r\n\r\n self.manual_button = tk.Button(self.top_frame, text=\"Manual\",\r\n fg=\"black\",\r\n bg=\"gray90\", font=self.label_font, bd=2,\r\n highlightthickness=0,\r\n command=self.manual_button)\r\n self.manual_button.pack(padx=15, side=\"right\")\r\n\r\n self.about_button = tk.Button(self.top_frame, text=\"About Us\",\r\n fg=\"black\",\r\n bg=\"gray90\", font=self.label_font, bd=2,\r\n highlightthickness=0,\r\n command=self.about_button)\r\n self.about_button.pack(padx=15, side=\"right\")\r\n\r\n self.param_button = tk.Button(self.top_frame, text=\"Parameters\",\r\n fg=\"black\",\r\n bg=\"gray90\", font=self.label_font, bd=2,\r\n highlightthickness=0,\r\n command=self.parameterButton)\r\n self.param_button.pack(padx=15, side=\"right\")\r\n\r\n self.operate_button = tk.Button(self.top_frame, text=\"Operation\",\r\n fg=\"black\",\r\n bg=\"gray90\", font=self.label_font,\r\n bd=2,\r\n highlightthickness=0,\r\n command=self.operateButton)\r\n self.operate_button.pack(padx=15, side=\"right\")",
"def draw(self, screen: pygame.Surface) -> bool:\n action = False # button is not clicked by default\n\n # get mouse position\n pos = pygame.mouse.get_pos()\n\n # check mouseover and clicked conditions\n if self.rect.collidepoint(pos): # if image collides with mouse\n # if mouse is pressed and has not been pressed before\n if pygame.mouse.get_pressed()[0] == 1 and self.clicked == False:\n action = True # button is clicked\n self.clicked = True # button is clicked\n\n if pygame.mouse.get_pressed()[0] == 0: # if not pressed\n self.clicked = False # button is not pressed\n\n # draw button\n screen.blit(self.image, (self.rect.x, self.rect.y))\n return action",
"def getNextSizeCoords(self):\n #get button width and height\n if self.cols:\n width = int(self.width/(self.numOfButtons+1))\n height = self.height\n else:\n width = int(self.width/cols)\n height = int((self.numOfButtons+1) % self.cols)\n\n #get button initial coordinates\n if self.cols:\n xPos = int(self.width/(self.numOfButtons+1))*len(self.buttons)\n yPos = self.startCoords[1]\n return (width,height),(xPos,yPos)",
"def __init__(self,screen,start,col=(0,0,0)):\n self.start = start\n self.col = col\n self.oldrect = start[0],start[1],1,1\n tmp = screen.get_at((start[0],start[1]))[:3]\n self.screen_backup = [[tmp],[tmp],[tmp],[tmp]]",
"def mybut(text, dummy, xl, yb, xw=0, yh=0, axisbg=None, color=0.85, fun=None, bspace=0.005):\n if axisbg is None: axisbg='lightgoldenrodyellow'\n\n global button_layout_cursor\n if xw==0: xw=0.015*(len(text)+1)\n if yh==0: yh=0.05\n## thisax=fig.add_axes([xl, yb, xw, yh], axisbg=axisbg) fundamentally wrong\n thisax=pl.axes([xl, yb, xw, yh], axisbg=axisbg)\n thisbut=Button(thisax, text)\n thisbut.on_clicked(fun)\n button_layout_cursor += xw+bspace\n return(thisbut)",
"def draw(self, win):\r\n # draw menu\r\n win.blit(self.image, self.rect)\r\n\r\n # draw button\r\n # (Q2) Draw buttons here\r\n for but in self.__buttons:\r\n win.blit(but.image, but.rect)",
"def create_buttons(self):\n self.create_button(\"ADD\", self.add_contact)\n self.create_button(\"EDIT\", self.edit, y=260)\n self.create_button(\"DELETE\", self.delete, y=210)\n self.create_button(\"VIEW\", self.view, y=160)\n self.create_button(\"EXIT\", self.exit_book, bg='tomato', x=300, y=320)\n self.create_button(\"RESET\", self.reset, y=310)",
"def create_all_buttons(self):\n\n # Create the play button\n button_y = (self.aliens[3].rect.bottom + self.settings.screen_padding)\n self.play_button = Button(self.screen, \"Play\", self.subtitle_color, self.bg_color,\n self.screen_rect.centerx, button_y)\n\n # Create the highscore button\n button_y = self.play_button.rect.bottom + self.settings.screen_padding\n self.highscore_button = Button(self.screen, \"Highscores\", self.orange_color,\n self.bg_color, self.screen_rect.centerx, button_y)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Capitalize all named entities found in the given list of lines.
|
def capitalize_entities(lines):
ner_list = ["PERSON", "NORP", "FACILITY", "ORG", "GPE", "LOC", "PRODUCT",
"EVENT", "WORK_OF_ART", "LAW", "LANGUAGE"]
pos_list = ["ADJ", "ADV", "NOUN", "PROPN", "VERB"]
nlp = spacy.load("en")
doc = nlp(" ".join(lines))
update_dict = {}
for ent in doc.ents:
if ent.label_ in ner_list:
tokens = []
combine = False
for token in ent:
text = token.text
if token.pos_ in ["PART", "PUNCT", "SYM"]:
fmt_str = r"(\s*){0}(\s*)"
match = re.search(fmt_str.format(text), ent.text)
if match.group(1) == "":
if len(tokens) == 0:
tokens.append(text)
else:
tokens[-1] = tokens[-1] + text
if match.group(2) == "":
combine = True
else:
if token.pos_ in pos_list and not "A" <= text[0] <= "Z":
text = text.capitalize()
if combine:
tokens[-1] = tokens[-1] + text
else:
tokens.append(text)
combine = False
capitalized = " ".join(tokens)
if ent.text != capitalized:
update_dict[ent.text] = capitalized
updated_lines = []
for line in lines:
for old, new in update_dict.items():
if old in line:
line = line.replace(old, new)
updated_lines.append(line)
return updated_lines
|
[
"def replace_with_uppercase(string, names, precompiled):\n for name in names:\n for result in precompiled[name].findall(string):\n string = string.replace(result, name)\n return string",
"def uncapitalize(s, preserveAcronymns='False'):\n\n pass",
"def capitalize(s):\n\n pass",
"def fix_title_capitalization(title):\n if re.search(\"[A-Z]\", title) and re.search(\"[a-z]\", title):\n return title\n word_list = re.split(' +', title)\n final = [word_list[0].capitalize()]\n for word in word_list[1:]:\n if word.upper() in COMMON_ACRONYMS:\n final.append(word.upper())\n elif len(word) > 3:\n final.append(word.capitalize())\n else:\n final.append(word.lower())\n return \" \".join(final)",
"def uncapitalize_name(name):\n buf = []\n for c in name:\n if 'A' <= c <= 'Z' and len(buf):\n buf.append('-')\n buf.append(c)\n return ''.join(buf).lower()",
"def standardize(self):\n import string\n self.full_name = string.capwords(self.full_name)",
"def __fix_annotation(self, value):\n tokens = value.split()\n for i in range(0, len(tokens)):\n if not tokens[i].isupper() and not tokens[i] in self.__fix_lower:\n tokens[i] = tokens[i].capitalize()\n return ''.join(tokens)",
"def make_name(*words):\n import itertools\n\n words = itertools.chain.from_iterable(w.split() for w in words)\n\n return ''.join(w.lower().capitalize() for w in words)",
"def merge_acronyms(self,s):\r\n r = re.compile(r'(?:(?<=\\.|\\s)[A-Z]\\.)+')\r\n acronyms = r.findall(s)\r\n for a in acronyms:\r\n s = s.replace(a, a.replace('.', ''))\r\n return s",
"def toUpper(self):\n self.name.toUpper()\n self.ext.toUpper()",
"def RewriteName(entry, title, translation_table):\n if title not in entry:\n return\n\n if (\n entry[title].upper() == entry[title]\n and entry[title] not in translation_table\n and entry[title] not in _ignored_probable_acronyms\n ):\n _ignored_probable_acronyms.add(entry[title])\n print(\n f\"Ignoring probable acryonym {len(_ignored_probable_acronyms)}: \"\n f'\"{entry[title]}\"',\n file=sys.stderr,\n )\n return\n\n entry[title] = translation_table.get(entry[title], entry[title])",
"def first_upper_case(words):\n return [w.capitalize() for w in words]",
"def upper(list_of_strings: Sequence[str]) -> Sequence[str]:\n return [item.upper() for item in list_of_strings]",
"def abbreviated_capwords(self, snake_case: str):\n abbreviated = self.abbreviations.sub(self.abbreviate, snake_case)\n return SNAKE_CASE.sub(self.capitalize, abbreviated)",
"def snake2camel(name):\n return re.sub(r'(?:^|_)([a-z])', lambda x: x.group(1).upper(), name)",
"def insert_civ_names(input_lines, all_names):\n out = []\n for line in input_lines:\n new_line = []\n split_line = line.split(' ')\n start_word_num = 0\n word_num = 0\n while word_num < len(split_line):\n word=split_line[word_num]\n if(word[-1] in '.,:;?!+-='):\n punct = word[-1]\n word = word[:-1]\n else:\n punct = ''\n w = 0\n leader = []\n if(word in all_names and word != 'I'):\n while(word in all_names):\n leader.append(word)\n w += 1\n word = split_line[word_num + w]\n civ = find_best_leader_match(leader)\n if civ is not False:\n new_line.extend(\n (' '.join(split_line[start_word_num:word_num]),\n ' {} ({}){} '.format(' '.join(leader), civ, punct)))\n start_word_num = word_num + len(leader)\n word_num = word_num + len(leader)\n else:\n word_num += 1\n else:\n word_num += 1\n new_line.append(' '.join(split_line[start_word_num:]))\n out.append(''.join(new_line))\n return(''.join(out))",
"def bill_types_to_acronyms(lines):\n update_dict = {}\n update_dict['assembly bill'] = 'ab'\n update_dict['assembly bill number'] = 'ab'\n update_dict['senate bill'] = 'sb'\n update_dict['senate bill number'] = 'sb'\n update_dict['house resolution'] = 'hr'\n update_dict['house resolution number'] = 'hr'\n #TODO\n \n updated_lines = []\n for line in lines:\n for old, new in update_dict.items():\n if old in line:\n line = line.replace(old, new)\n updated_lines.append(line)\n return updated_lines",
"def inner(w):\r\n return w.capitalize()",
"def capitalise_all(self, i: list):\n selfcopy = self.copy()\n for position in i:\n selfcopy.capitalise(position)\n return selfcopy"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convert all bill types into their acronym form (e.g. "assembly bill" > "ab")
|
def bill_types_to_acronyms(lines):
update_dict = {}
update_dict['assembly bill'] = 'ab'
update_dict['assembly bill number'] = 'ab'
update_dict['senate bill'] = 'sb'
update_dict['senate bill number'] = 'sb'
update_dict['house resolution'] = 'hr'
update_dict['house resolution number'] = 'hr'
#TODO
updated_lines = []
for line in lines:
for old, new in update_dict.items():
if old in line:
line = line.replace(old, new)
updated_lines.append(line)
return updated_lines
|
[
"def acronym(input):\n words = input.split()\n res = ''\n for word in words:\n res = res + word[0].upper()\n return res",
"def applyAcronymToMsType (msType_phrase):\n mstype_dict = {\n 'Research Article':'Res',\n 'Short Report':'SR',\n 'Review':'Rw',\n 'Commentary':'Com',\n 'Viewpoint':'VP',\n 'Editorial':'Editorial material - Editorial',\n 'Letter to the Editor':'Editorial material - Letter to editor',\n 'Debate':'Editorial Material - Debate',\n 'Corrigendum':'Editorial material - Corrigendum'\n }\n\n if msType_phrase in mstype_dict:\n short_phrase = mstype_dict[msType_phrase]\n else:\n short_phrase = msType_phrase\n\n return short_phrase",
"def acronym(phrase):\n phraseList = phrase.split()\n answer = \"\"\n for word in phraseList:\n x = word[0]\n answer = answer + x\n print(answer)",
"def toUpper(self):\n self.name.toUpper()\n self.ext.toUpper()",
"def bill_to_code(bill_type, sub_type):\n if bill_type == 'AGFS_FEE' and sub_type in BASIC_FEES_MAP.values():\n return 'BABAF'\n return REVERSE_FEES_MAP[(bill_type, sub_type)]",
"def class_abbrev(type):\n ...",
"def abbreviate(phrase: str) -> str:\n words: Pattern[str] = re.compile(r\"[A-Za-z']+\")\n word_list: List[str] = words.findall(phrase)\n acronym = ''.join(word[0] for word in word_list).upper()\n return acronym",
"def reducedPublication (\n\n self,\n text = None\n ) :\n\n text = self.separatorsToSpaces( text )\n\n if len( text ) == 0 : return \"\"\n\n result = \"\"\n\n # this is an acronym\n\n if ( text.isupper() ) and ( text.isalpha() ) and ( not \" \" in text ) :\n\n for character in text :\n\n result = result + character.lower() + \" \"\n\n\n # normal\n\n else :\n\n for word in utilities.textToWords( text ) :\n\n if word == \"\" : continue\n\n if ( not word.isupper() ) and ( word in self.omissionList ) : continue\n\n # removes accents\n\n result = result + utilities.normalized( word ).lower() + \" \"\n\n return result.strip()",
"def standardize_class_name(base, tablename, table):\n\n return _inflector.singular_noun(tablename[0].upper() +\n re.sub(r'_([a-z])', lambda m: m.group(1).upper(), tablename[1:]))",
"def standardise_names(self, domains):\n \n # convert the list of alternative domain names into a single identifier\n for pos in range(len(domains)):\n names = list(domains[pos][\"domain_type\"])\n \n names = [ x.replace(\" domain\", \"\") for x in names ]\n name_lengths = [ len(x) for x in names ]\n \n # find the shortest name (note, this won't necessarily be the best\n # name, but it's a start).\n name_pos = name_lengths.index(min(name_lengths))\n \n domains[pos][\"domain_type\"] = names[name_pos]\n \n return domains",
"def abbreviate(**kwargs):\n result = abbreviate_journal_name(kwargs.get(\"abbreviate\"))\n click.echo(result)",
"def _handle_abbreviations(s):\n # Format: abbrev = \"meaning\" gender (kk|kvk|hk)\n a = s.split('=', maxsplit=1)\n abbrev = a[0].strip()\n m = a[1].strip().split('\\\"')\n par = \"\"\n if len(m) >= 3:\n # Something follows the last quote\n par = m[-1].strip()\n gender = \"hk\" # Default gender is neutral\n fl = None # Default word category is None\n if par:\n p = par.split()\n if len(p) >= 1:\n gender = p[0].strip()\n if len(p) >= 2:\n fl = p[1].strip()\n Abbreviations.add(abbrev, m[1], gender, fl)",
"def standardize(self):\n import string\n self.full_name = string.capwords(self.full_name)",
"def test_build_abbreviation(self):\n\n sub_agency_name = \"Administrative Conference of the United States\"\n self.assertEqual(\"ACUS\", build_abbreviation(sub_agency_name))\n\n sub_agency_name = \"U.S. Customs & Border Protection\"\n self.assertEqual(\"USCBP\", build_abbreviation(sub_agency_name))",
"def underToAllCaps(value):\n return ' '.join(map(lambda x: x.title(), value.split('_')))",
"def export_asset_types(asset_types: Sequence) -> str:\n friendly_values = []\n for asset_type in asset_types:\n value = AssetTypes[asset_type].label if hasattr(AssetTypes, asset_type) else asset_type\n friendly_values.append(value)\n return ', '.join(friendly_values)",
"def __fix_taxon_names(self) -> None:\n\n def taxon_fixer(taxon):\n if taxon is not None and pd.notna(taxon):\n tmp_taxon_trimmed = taxon.lower().strip()\n if len(tmp_taxon_trimmed) > 0:\n if tmp_taxon_trimmed[0] == \"[\":\n tmp_taxon_trimmed = tmp_taxon_trimmed[1:]\n if tmp_taxon_trimmed[-1] == \"]\":\n tmp_taxon_trimmed = tmp_taxon_trimmed[:-1]\n return tmp_taxon_trimmed.capitalize()\n else:\n return None\n else:\n return None\n\n self.__internal_taxonomy.loc[:, VALID_RANKS] = self.__internal_taxonomy.loc[\n :, VALID_RANKS\n ].applymap(taxon_fixer)",
"def convert_attribute(aim_attribute, to_aim=True):\n if to_aim:\n # Camel to _ (APIC to AIM)\n result = []\n for x in aim_attribute:\n if x.isupper():\n result.append('_')\n result.append(x.lower())\n return ''.join(result)\n else:\n # _ to Camel (AIM to APIC)\n parts = aim_attribute.split('_')\n result = parts[0]\n for part in parts[1:]:\n result += part[0].upper() + part[1:]\n return result",
"def lookup_abbreviation(w):\r\n # Remove brackets, if any, before lookup\r\n clean_w = w[1:-1] if w[0] == '[' else w\r\n # Return a single-entity list with one meaning\r\n m = Abbreviations.DICT.get(clean_w, None)\r\n return None if m is None else [ BIN_Meaning._make(m) ]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets all futures for this delegate. These can be used to handle any pending futures when a peripheral is disconnected.
|
def futures(self) -> Iterable[asyncio.Future]:
services_discovered_future = (
(self._services_discovered_future,)
if hasattr(self, "_services_discovered_future")
else ()
)
return itertools.chain(
services_discovered_future,
self._service_characteristic_discovered_futures.values(),
self._characteristic_descriptor_discover_futures.values(),
self._characteristic_read_futures.values(),
self._characteristic_write_futures.values(),
self._descriptor_read_futures.values(),
self._descriptor_write_futures.values(),
self._characteristic_notify_change_futures.values(),
self._read_rssi_futures.values(),
)
|
[
"def async_all_discovered_devices(self) -> Iterable[BLEDevice]:\n return itertools.chain.from_iterable(\n scanner.discovered_devices for scanner in self._scanners\n )",
"async def get_all(self) -> List[T]:\n all_items = [await self.get()]\n try:\n while True:\n all_items.append(self.get_nowait())\n except asyncio.queues.QueueEmpty:\n pass\n return all_items",
"async def get_all_if_any(self) -> List[T]:\n all_items = []\n try:\n while True:\n all_items.append(self.get_nowait())\n except asyncio.queues.QueueEmpty:\n pass\n return all_items",
"def waitForAllConnectionsToClose(self):\n if not self._connections:\n return self._stop()\n return self._allConnectionsClosed.deferred().addBoth(self._stop)",
"def main(self) -> list:\r\n\r\n for thread in range(self.threads):\r\n t = threading.Thread(target=self.threader)\r\n t.daemon = True\r\n t.start()\r\n\r\n for curr in self.hosts:\r\n self.q.put(curr)\r\n\r\n self.q.join()\r\n\r\n return self.res",
"def done(self):\n yield DeferredList(self.observers)\n yield DeferredList(list(self.tasks))",
"def get_delegates(self):\n candidate_elections = CandidateElection.objects.filter(\n candidate=self,\n )\n\n delegates = None\n for ce in candidate_elections:\n delegates = delegates | ce.delegates.all()\n\n return delegates",
"async def _async_get_discoveries(self) -> list[ssdp.SsdpServiceInfo]:\n LOGGER.debug(\"_get_discoveries\")\n\n # Get all compatible devices from ssdp's cache\n discoveries: list[ssdp.SsdpServiceInfo] = []\n for udn_st in DmrDevice.DEVICE_TYPES:\n st_discoveries = await ssdp.async_get_discovery_info_by_st(\n self.hass, udn_st\n )\n discoveries.extend(st_discoveries)\n\n # Filter out devices already configured\n current_unique_ids = {\n entry.unique_id\n for entry in self._async_current_entries(include_ignore=False)\n }\n discoveries = [\n disc for disc in discoveries if disc.ssdp_udn not in current_unique_ids\n ]\n\n return discoveries",
"def get_all(cls):\r\n\r\n from . import server\r\n\r\n tasks = []\r\n servers = server.Server.find(enabled = True)\r\n for _server in servers:\r\n timeout = _server.val(\"timeout\", DEFAULT_TIMEOUT)\r\n task = Task(_server, timeout)\r\n tasks.append(task)\r\n\r\n return tasks + list(TASKS)",
"def futures_get_all_symbols(self):\n exchange = self.client.futures_exchange_info()['symbols']\n symbol_list = list()\n for dictionary in exchange:\n symbol_list.append(dictionary['symbol'] + \"PERP\")\n return symbol_list",
"def getList(self):\n return self._queue",
"def _match_results_to_futures(self):\n deserialize = self.funcx_executor.funcx_client.fx_serializer.deserialize\n with self._new_futures_lock:\n futures_to_complete = [\n self._open_futures.pop(tid)\n for tid in self._open_futures.keys() & self._received_results.keys()\n ]\n if not self._open_futures:\n self._open_futures_empty.set()\n\n for fut in futures_to_complete:\n props, res = self._received_results.pop(fut.task_id)\n\n if res.is_error:\n fut.set_exception(\n TaskExecutionFailed(res.data, str(props.timestamp or 0))\n )\n else:\n try:\n fut.set_result(deserialize(res.data))\n except InvalidStateError as err:\n log.error(\n f\"Unable to set future state ({err}) for task: {fut.task_id}\"\n )\n except Exception as exc:\n task_exc = Exception(\n f\"Malformed or unexpected data structure. Data: {res.data}\",\n )\n task_exc.__cause__ = exc\n fut.set_exception(task_exc)",
"def _get_future_devices(self, context):\n monitor = Monitor.from_netlink(context)\n monitor.filter_by(\"hidraw\")\n monitor.start()\n\n self._scanning_log_message()\n for device in iter(monitor.poll, None):\n if device.action == \"add\":\n # Sometimes udev rules has not been applied at this point,\n # causing permission denied error if we are running in user\n # mode. With this sleep this will hopefully not happen.\n sleep(1)\n\n yield device\n self._scanning_log_message()",
"async def futures_get_all_orders(self, **params):\r\n return await self.client_helper(\"futures_get_all_orders\", **params)",
"def _async_forecast_daily(self) -> list[Forecast] | None:\n return self._forecast(False)",
"def GetAllTransports(self):\n return self.native.get_all_transports()",
"async def get_services(self, **kwargs) -> BleakGATTServiceCollection:\n await self._services_resolved.wait()\n return self.services",
"async def get_servers(self) -> list:\n\t\tquery = \"SELECT id, name FROM servers\"\n\t\tself._logger.debug(\"Getting all servers\")\n\n\t\tasync with self.pool.acquire() as conn:\n\t\t\tasync with conn.transaction():\n\t\t\t\tres = await conn.fetchval(query, server_id)\n\n\t\t\t\treturn res",
"def _get_finished_functions(self):\n\n finished_func_addrs = []\n for func_addr, all_jobs in self._jobs_to_analyze_per_function.items():\n if not all_jobs:\n # great! we have finished analyzing this function!\n finished_func_addrs.append(func_addr)\n\n return finished_func_addrs"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Transaction service sync message receive txs data
|
def msg_tx_service_sync_txs(self, msg: TxServiceSyncTxsMessage) -> None:
network_num = msg.network_num()
self.node.last_sync_message_received_by_network[network_num] = time.time()
tx_service = self.node.get_tx_service(network_num)
result_items = tx_service.process_tx_sync_message(msg)
sync_metrics = self.node.sync_metrics[network_num]
sync_metrics["msgs"] += 1
for item in result_items:
sync_metrics["tx_count"] += 1
if item.content_length > 0:
sync_metrics["tx_content_count"] += 1
for short_id, transaction_flag in zip(
item.short_ids, item.transaction_flag_types
):
self.node.sync_short_id_buckets[network_num].incr_short_id(short_id)
if TransactionFlag.PAID_TX in transaction_flag:
tx_service.set_short_id_transaction_type(short_id, transaction_flag)
self.conn.log_debug(
"TxSync processed msg from {} network {}, msgs: {}, transactions: {}, content: {}",
self.conn,
network_num,
sync_metrics["msgs"],
sync_metrics["tx_count"],
sync_metrics["tx_content_count"],
)
|
[
"def conduct_transaction(self,trans,o):\n pass",
"def transaction_command():\n pass",
"def sendtx(cmd):\n txData = cmd.split(\"sendtx \")[-1]\n if \"{\" in txData:\n txData = json.loads(txData)\n print(\"Sending transaction...\")\n coin.addTx(txData)",
"def svn_txdelta_send_txstream(*args) -> \"svn_error_t *\":\n return _delta.svn_txdelta_send_txstream(*args)",
"def on_tx_event(self, data):\n # data = array.array('B', [1, 255, 133, 128, 8, 0, 128, 0])\n page_to_send = self._get_next_page()\n data_payload = page_to_send.to_payload() # get new data payload to sent at this TX event.\n # call channel's send_broadcast_data to set the TX buffer to new data.\n self.channel.send_broadcast_data(data_payload)\n print(\"send TX\")",
"def handle_message(self, message):\n payload = str(message.payload.message)\n print(\"received transaction payload: \" + payload)\n\n segment = TxTennaSegment.deserialize_from_json(payload)\n self.segment_storage.put(segment)\n network = self.segment_storage.get_network(segment.payload_id)\n\n ## process incoming transaction confirmation from another server\n if (segment.block > 0):\n print(\"\\nTransaction \" + segment.payload_id + \" confirmed in block \" + str(segment.block))\n elif (segment.block is 0):\n print(\"\\nTransaction \" + segment.payload_id + \" added to the the mem pool\")\n elif (network is 'd'):\n ## process message data\n if (self.segment_storage.is_complete(segment.payload_id)):\n filename = self.segment_storage.get_transaction_id(segment.payload_id)\n t = Thread(target=self.receive_message_from_gateway, args=(filename,))\n t.start()\n else:\n ## process incoming tx segment\n if not self.local :\n headers = {u'content-type': u'application/json'}\n url = \"https://api.samouraiwallet.com/v2/txtenna/segments\" ## default txtenna-server\n r = requests.post(url, headers= headers, data=payload)\n print(r.text)\n\n if (self.segment_storage.is_complete(segment.payload_id)):\n sender_gid = message.sender.gid_val\n tx_id = self.segment_storage.get_transaction_id(segment.payload_id)\n\n ## check for confirmed transaction in a new thread\n if (self.local) :\n t = Thread(target=self.confirm_bitcoin_tx_local, args=(tx_id, sender_gid))\n else :\n t = Thread(target=self.confirm_bitcoin_tx_online, args=(tx_id, sender_gid, network))\n t.start()",
"def _transaction_start(self):\n self._command = []\n self._expected = 0",
"def sync_to_salesforce():",
"def transaction_sent(self, value):\n pass #value = 1 if txn was written, 0 if it already existed",
"def _transaction_end(self):\n # Ask to return response bytes immediately.\n self._command.append('\\x87')\n # Send the entire command to the MPSSE.\n self._data._write(''.join(self._command))\n # Read response bytes and return them.\n return bytearray(self._data._poll_read(self._expected))",
"def confirm_bitcoin_tx_local(self, hash, sender_gid): \n\n ## send transaction to local bitcond\n segments = self.segment_storage.get_by_transaction_id(hash)\n raw_tx = self.segment_storage.get_raw_tx(segments)\n\n ## pass hex string converted to bytes\n try :\n proxy1 = bitcoin.rpc.Proxy()\n raw_tx_bytes = x(raw_tx)\n tx = CMutableTransaction.stream_deserialize(BytesIO(raw_tx_bytes))\n r1 = proxy1.sendrawtransaction(tx)\n except :\n print(\"Invalid Transaction! Could not send to network.\")\n return\n\n ## try for 30 minutes to confirm the transaction\n for n in range(0, 30) :\n try :\n proxy2 = bitcoin.rpc.Proxy()\n r2 = proxy2.getrawtransaction(r1, True)\n\n ## send zero-conf message back to tx sender\n confirmations = r2.get('confirmations', 0)\n rObj = TxTennaSegment('', '', tx_hash=hash, block=confirmations)\n arg = str(sender_gid) + ' ' + rObj.serialize_to_json()\n self.do_send_private(arg)\n\n print(\"\\nSent to GID: \" + str(sender_gid) + \": Transaction \" + hash + \" added to the mempool.\")\n break \n except IndexError:\n ## tx_id not yet in the global mempool, sleep for a minute and then try again\n sleep(60)\n continue \n \n ## wait for atleast one confirmation\n for m in range(0, 30):\n sleep(60) # sleep for a minute\n try :\n proxy3= bitcoin.rpc.Proxy()\n r3 = proxy3.getrawtransaction(r1, True)\n confirmations = r3.get('confirmations', 0)\n ## keep waiting until 1 or more confirmations\n if confirmations > 0:\n break\n except :\n ## unknown RPC error, but keep trying\n traceback.print_exc()\n\n if confirmations > 0 :\n ## send confirmations message back to tx sender if confirmations > 0\n rObj = TxTennaSegment('', '', tx_hash=hash, block=confirmations)\n arg = str(sender_gid) + ' ' + rObj.serialize_to_json()\n self.do_send_private(arg)\n print(\"\\nSent to GID: \" + str(sender_gid) + \", Transaction \" + hash + \" confirmed in \" + str(confirmations) + \" blocks.\")\n else :\n print(\"\\CTransaction from GID: \" + str(sender_gid) + \", Transaction \" + hash + \" not confirmed after 30 minutes.\")",
"def tx_fifo(self) -> int:\n ...",
"async def test_prevent_out_of_order_txs(self):\n\n tx1 = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10)\n dtx1 = decode_transaction(tx1)\n stx1 = sign_transaction(tx1, FAUCET_PRIVATE_KEY)\n tx2 = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10, dtx1.nonce + 1)\n stx2 = sign_transaction(tx2, FAUCET_PRIVATE_KEY)\n\n resp = await self.fetch(\"/tx\", method=\"POST\", body={\"tx\": stx2})\n self.assertEqual(resp.code, 400, resp.body)\n\n resp = await self.fetch(\"/tx\", method=\"POST\", body={\"tx\": stx1})\n self.assertEqual(resp.code, 200, resp.body)\n resp = await self.fetch(\"/tx\", method=\"POST\", body={\"tx\": stx2})\n self.assertEqual(resp.code, 200, resp.body)\n\n # lets the transaction queue processing run before ending the test\n await asyncio.sleep(1)",
"def on_transaction_finish(self):\n print(\"Transaction successful\")",
"def svn_txdelta_send_contents(*args) -> \"svn_error_t *\":\n return _delta.svn_txdelta_send_contents(*args)",
"def user32_DdeClientTransaction(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"pData\", \"cbData\", \"hConv\", \"hszItem\", \"wFmt\", \"wType\", \"dwTimeout\", \"pdwResult\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def broadcast_txn(self, txn):\n if not reactor.running:\n reactor.callWhenRunning(self.broadcast_txn, txn)\n reactor.run()\n else:\n for addr in list(self.bcnode.rpc_peers.values()):#[:2]:\n reactor.callFromThread(self.send_txn, addr, txn)",
"def testGetFifoTransactionManagerTransaction(self):\n class Request: pass\n self._queue_manager.reset()\n handle = Request()\n handle.transaction_id = self._queue_manager.getNextTID()\n handle.message = b\"testing\"\n self._queue_manager.addTransaction(handle)\n result = self._queue_manager.getTransaction(handle.transaction_id)\n self.assertEqual(handle.message, result.message)",
"def test_blockTxCmdsProcessing(self):\n return # skip this test \n \n self.comm.commStartTime = self.nodeParams.clock.getTime() - 1.0\n blockReqID = random.randint(1,255) # just a random \"unique\" number \n startTime = int(self.nodeParams.clock.getTime() + 10.0)\n length = self.nodeParams.config.commConfig['maxTxBlockSize']\n txNode = 1 \n\n ## TDMACmds['BlockTxRequest']\n cmdMsg = Command(TDMACmds['BlockTxRequest'], {'blockReqID': blockReqID, 'startTime': startTime, 'length': length}, [TDMACmds['BlockTxRequest'], txNode, self.nodeParams.get_cmdCounter()]).serialize(self.nodeParams.clock.getTime())\n \n # Process and check results \n assert(self.comm.processMsg(cmdMsg, args = {'nodeStatus': self.nodeStatus, 'comm': self.comm, 'clock': self.nodeParams.clock}) == True)\n assert(len(self.comm.radio.txBuffer) == calcsize(CmdDict[TDMACmds['BlockTxRequestResponse']].packFormat) + calcsize(headers['NodeHeader']['format'])) # response sent\n assert(self.comm.blockTxStatus['blockReqID'] == blockReqID)\n assert(self.comm.blockTxStatus['status'] == TDMABlockTxStatus.pending)\n assert(self.comm.blockTxStatus['txNode'] == txNode)\n assert(self.comm.blockTxStatus['startTime'] == startTime)\n assert(self.comm.blockTxStatus['length'] == length)\n \n ## TDMACmds['BlockTxConfirmed']\n time.sleep(0.01)\n cmdMsg = Command(TDMACmds['BlockTxConfirmed'], {'blockReqID': blockReqID}, [TDMACmds['BlockTxConfirmed'], txNode, self.nodeParams.get_cmdCounter()]).serialize(self.nodeParams.clock.getTime())\n assert(self.comm.processMsg(cmdMsg, args = {'nodeStatus': self.nodeStatus, 'comm': self.comm, 'clock': self.nodeParams.clock}) == True)\n assert(self.comm.blockTxStatus['status'] == TDMABlockTxStatus.confirmed) # status updated to confirmed\n\n ## TDMACmds['BlockTxStatus']\n self.comm.resetBlockTxStatus()\n time.sleep(0.01)\n cmdMsg = Command(TDMACmds['BlockTxStatus'], {'blockReqID': blockReqID, 'startTime': startTime, 'length': length}, [TDMACmds['BlockTxStatus'], txNode, self.nodeParams.get_cmdCounter()]).serialize(self.nodeParams.clock.getTime())\n # Check status updated\n assert(self.comm.processMsg(cmdMsg, args = {'nodeStatus': self.nodeStatus, 'comm': self.comm, 'clock': self.nodeParams.clock}) == True)\n assert(len(self.comm.radio.txBuffer) == calcsize(CmdDict[TDMACmds['BlockTxRequestResponse']].packFormat) + calcsize(headers['NodeHeader']['format'])) # response sent\n assert(self.comm.blockTxStatus['blockReqID'] == blockReqID)\n assert(self.comm.blockTxStatus['status'] == TDMABlockTxStatus.confirmed)\n assert(self.comm.blockTxStatus['txNode'] == txNode)\n assert(self.comm.blockTxStatus['startTime'] == startTime)\n assert(self.comm.blockTxStatus['length'] == length)\n\n # Check status updated to confirmed if only pending\n time.sleep(0.01)\n cmdMsg = Command(TDMACmds['BlockTxStatus'], {'blockReqID': blockReqID, 'startTime': startTime, 'length': length}, [TDMACmds['BlockTxStatus'], txNode, self.nodeParams.get_cmdCounter()]).serialize(self.nodeParams.clock.getTime()) # update command counter\n self.comm.blockTxStatus['status'] = TDMABlockTxStatus.pending\n assert(self.comm.processMsg(cmdMsg, args = {'nodeStatus': self.nodeStatus, 'comm': self.comm, 'clock': self.nodeParams.clock}) == True)\n assert(self.comm.blockTxStatus['status'] == TDMABlockTxStatus.confirmed)\n\n ## TDMACmds['BlockTxRequestResponse']\n time.sleep(0.01)\n self.comm.resetBlockTxStatus()\n self.comm.blockTxStatus['txNode'] = self.nodeParams.config.nodeId # this node requested block transfer\n self.comm.blockTxStatus['status'] = TDMABlockTxStatus.pending\n cmdMsg = Command(TDMACmds['BlockTxRequestResponse'], {'blockReqID': blockReqID, \"accept\": True}, [TDMACmds['BlockTxRequestResponse'], 1, self.nodeParams.get_cmdCounter()]).serialize(self.nodeParams.clock.getTime())\n print(self.nodeParams.config.nodeId)\n self.nodeParams.nodeStatus[0].present = True # mark another node as present\n self.comm.populateBlockResponseList() # create block response list\n\n # Test acceptance marked\n assert(self.comm.processMsg(cmdMsg, args = {'nodeStatus': self.nodeStatus, 'comm': self.comm, 'clock': self.nodeParams.clock}) == True)\n assert(self.comm.blockTxStatus['blockResponseList'][1] == True)\n\n # Test rejection marked\n time.sleep(0.01)\n cmdMsg = Command(TDMACmds['BlockTxRequestResponse'], {'blockReqID': blockReqID, \"accept\": False}, [TDMACmds['BlockTxRequestResponse'], 1, self.nodeParams.get_cmdCounter()]).serialize(self.nodeParams.clock.getTime())\n assert(self.comm.processMsg(cmdMsg, args = {'nodeStatus': self.nodeStatus, 'comm': self.comm, 'clock': self.nodeParams.clock}) == True)\n assert(self.comm.blockTxStatus['blockResponseList'][1] == False)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the Google Docs parser from the ``WAGTAILCONTENTIMPORT_GOOGLE_PARSER`` setting, defaulting to wagtail_content_import.parsers.google.GoogleDocumentParser.
|
def get_google_parser():
parser_string = get_google_parser_string()
return import_string(parser_string)
|
[
"def read_google_parser_config():\n parsers_config_list = global_config.get('parsers')\n logging.info('Will get google')\n google_config = dict()\n for parser_config in parsers_config_list:\n if parser_config.get('name') == 'google':\n google_config = copy.deepcopy(parser_config)\n return google_config",
"def get_docx_parser():\n\n parser_string = get_docx_parser_string()\n\n return import_string(parser_string)",
"def get_parser(referring_url):\n engines = _get_search_engines()\n try:\n if isinstance(referring_url, ParseResult):\n url_parts = referring_url\n else:\n url_parts = urlparse(referring_url)\n except ValueError:\n msg = \"Malformed URL '{}' could not parse\".format(referring_url)\n log.debug(msg, exc_info=True)\n # Malformed URLs\n return\n\n # First try to look up a search engine by the host name incase we have\n # a direct entry for it\n parser = engines.get(url_parts.netloc, 'nothing')\n if parser == 'nothing':\n # Now we'll try searching by lossy domain which converts\n # things like country codes for us\n parser = engines.get(_get_lossy_domain(url_parts.netloc),\n 'nothing')\n\n if parser == 'nothing':\n # no parser found\n return None\n\n return parser",
"def get_parser(referring_url):\n engines = _get_search_engines()\n url_parts = _unicode_urlparse(referring_url)\n if url_parts is None:\n return None\n\n query = _serp_query_string(url_parts)\n\n domain = url_parts.netloc\n path = url_parts.path\n engine_key = url_parts.netloc\n stripped_domain = domain[4:] if domain.startswith('www.') else None\n # Try to find a parser in the engines list. We go from most specific to\n # least specific order:\n # 1. <domain><path>\n # 2. <custom search engines>\n # 3. <domain>\n # 4. <stripped_domain>\n # The second step has some special exceptions for things like Google custom\n # search engines, yahoo and yahoo images\n if '{}{}'.format(domain, path) in engines:\n engine_key = '{}{}'.format(domain, path)\n elif domain not in engines and stripped_domain not in engines:\n if query[:14] == 'cx=partner-pub':\n # Google custom search engine\n engine_key = 'google.com/cse'\n elif url_parts.path[:28] == '/pemonitorhosted/ws/results/':\n # private-label search powered by InfoSpace Metasearch\n engine_key = 'wsdsold.infospace.com'\n elif '.images.search.yahoo.com' in url_parts.netloc:\n # Yahoo! Images\n engine_key = 'images.search.yahoo.com'\n elif '.search.yahoo.com' in url_parts.netloc:\n # Yahoo!\n engine_key = 'search.yahoo.com'\n else:\n return None\n\n return engines.get(engine_key) or engines.get(stripped_domain)",
"def get_parser(self, force=False):\n if not self.__parser or force:\n self.__parser = self._create_parser()\n return self.__parser",
"def get_parser(self, format):\n try:\n parser = self._parsers[format]\n except KeyError:\n raise ValueError(f\"{format} is not a registered format.\")\n return parser",
"def get_document_loader():\n return _default_document_loader",
"def parser(self):\r\n if self._parser is None:\r\n self._parser = cache.load_module(self.path, self.name) \\\r\n or self._load_module()\r\n return self._parser",
"def get_parser():\n parser = (\n MarkdownIt(\"commonmark\")\n .enable(\"table\")\n .use(front_matter_plugin)\n .use(myst_block_plugin)\n .use(myst_role_plugin)\n # we only need to parse block level components (for efficiency)\n .disable(\"inline\", True)\n )\n return parser",
"def get_doc(self, type_, name):\n if type_ == \"doxygen\":\n return self.doxydocs.get(name)\n if type_ == \"sphinx\":\n return self.sphinxdocs.get(name)",
"def get_parser(typ: Type[P]) -> Callable[[str], P]:\n try:\n return cast(\n Callable[[str], P],\n {\n str: parse_str,\n bool: parse_bool,\n int: parse_int,\n tuple: parse_tuple,\n list: parse_list,\n set: parse_set,\n }[typ],\n )\n except KeyError:\n raise NotImplementedError(\"Unsupported setting type: %r\", typ)",
"def getParser(format):\n if format not in parser_index:\n emsg = \"no parser for '%s' format\" % format\n raise StructureFormatError(emsg)\n pmod = parser_index[format]['module']\n import_cmd = 'from matter.Parsers import %s as pm' % pmod\n exec(import_cmd)\n return pm.getParser()",
"def get_google_flow(scheme=\"http\"):\n url = '%s://%s/google/oauth2callback' % (scheme, get_config('domain'))\n return flow_from_clientsecrets(\n 'client_secrets.json',\n scope='https://www.googleapis.com/auth/drive',\n redirect_uri=url,\n )",
"def get_google_id(cls, google_file):\n\n with open(google_file) as data_file:\n data = json.load(data_file)\n return data['doc_id']",
"def getDocument(self, docSpec):\n if isinstance(docSpec, JavaLuceneDocument):\n # already a Lucene document:\n return docSpec\n elif isinstance(docSpec, int):\n # a document ID returned by a search:\n if docSpec < 0:\n return None\n else:\n return self.fbt.getIndexReader().document(docSpec)\n elif isinstance(docSpec, str):\n docSpec = toFbtMid(docSpec)\n return self.fbt.getSubjectDoc(docSpec)\n else:\n raise Exception('cannot lookup Lucene document for docSpec' + str(docSpec))",
"def job_site_parser():\n jobSiteParser = JobSiteParser()\n\n return jobSiteParser",
"def corpus_parser(self):\n return self.tasks[self.main_task].corpus_parser",
"def parse(text: str, production_rule: str, listener) -> Optional[jsgParserVisitor]:\n error_listener = ParseErrorListener()\n lexer = jsgLexer(InputStream(text))\n lexer.addErrorListener(error_listener)\n tokens = CommonTokenStream(lexer)\n tokens.fill()\n if error_listener.n_errors:\n return None\n parser = jsgParser(tokens)\n parser.addErrorListener(error_listener)\n base_node = getattr(parser, production_rule)()\n listener_module = listener(JSGDocContext())\n listener_module.visit(base_node)\n return listener_module if not error_listener.n_errors else None",
"def _get_document_util(shipyard_conf):\n dh_client = DeckhandClientFactory(shipyard_conf).get_client()\n return DocumentValidationUtils(dh_client)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the Office Open XML parser from the ``WAGTAILCONTENTIMPORT_DOCX_PARSER`` setting, defaulting to wagtail_content_import.parsers.microsoft.DocxParser.
|
def get_docx_parser():
parser_string = get_docx_parser_string()
return import_string(parser_string)
|
[
"def get_parser(self, force=False):\n if not self.__parser or force:\n self.__parser = self._create_parser()\n return self.__parser",
"def parser(self):\r\n if self._parser is None:\r\n self._parser = cache.load_module(self.path, self.name) \\\r\n or self._load_module()\r\n return self._parser",
"def get_document_loader():\n return _default_document_loader",
"def get_parser(referring_url):\n engines = _get_search_engines()\n try:\n if isinstance(referring_url, ParseResult):\n url_parts = referring_url\n else:\n url_parts = urlparse(referring_url)\n except ValueError:\n msg = \"Malformed URL '{}' could not parse\".format(referring_url)\n log.debug(msg, exc_info=True)\n # Malformed URLs\n return\n\n # First try to look up a search engine by the host name incase we have\n # a direct entry for it\n parser = engines.get(url_parts.netloc, 'nothing')\n if parser == 'nothing':\n # Now we'll try searching by lossy domain which converts\n # things like country codes for us\n parser = engines.get(_get_lossy_domain(url_parts.netloc),\n 'nothing')\n\n if parser == 'nothing':\n # no parser found\n return None\n\n return parser",
"def getOffice(self):\n self.lock.acquire_read()\n try:\n return self._office\n finally:\n self.lock.release()",
"def get_parser(referring_url):\n engines = _get_search_engines()\n url_parts = _unicode_urlparse(referring_url)\n if url_parts is None:\n return None\n\n query = _serp_query_string(url_parts)\n\n domain = url_parts.netloc\n path = url_parts.path\n engine_key = url_parts.netloc\n stripped_domain = domain[4:] if domain.startswith('www.') else None\n # Try to find a parser in the engines list. We go from most specific to\n # least specific order:\n # 1. <domain><path>\n # 2. <custom search engines>\n # 3. <domain>\n # 4. <stripped_domain>\n # The second step has some special exceptions for things like Google custom\n # search engines, yahoo and yahoo images\n if '{}{}'.format(domain, path) in engines:\n engine_key = '{}{}'.format(domain, path)\n elif domain not in engines and stripped_domain not in engines:\n if query[:14] == 'cx=partner-pub':\n # Google custom search engine\n engine_key = 'google.com/cse'\n elif url_parts.path[:28] == '/pemonitorhosted/ws/results/':\n # private-label search powered by InfoSpace Metasearch\n engine_key = 'wsdsold.infospace.com'\n elif '.images.search.yahoo.com' in url_parts.netloc:\n # Yahoo! Images\n engine_key = 'images.search.yahoo.com'\n elif '.search.yahoo.com' in url_parts.netloc:\n # Yahoo!\n engine_key = 'search.yahoo.com'\n else:\n return None\n\n return engines.get(engine_key) or engines.get(stripped_domain)",
"def get_google_parser():\n parser_string = get_google_parser_string()\n\n return import_string(parser_string)",
"def extract_docx(self):\n txt = docx2txt.process(self.file_path)\n if txt:\n return txt.encode('ascii', 'ignore').decode(\"utf-8\").replace('\\f', ' '). \\\n replace('\\n', ' ').replace('\\t', ' ').replace(u'\\xa0', ' ').replace('\\u200b', ' ').lower()\n\n return None",
"def getCurrentDocument():\n return Document(HopperLowLevel.currentDocument())",
"def get_parser():\n parser = (\n MarkdownIt(\"commonmark\")\n .enable(\"table\")\n .use(front_matter_plugin)\n .use(myst_block_plugin)\n .use(myst_role_plugin)\n # we only need to parse block level components (for efficiency)\n .disable(\"inline\", True)\n )\n return parser",
"def get_parser(cls, name: str) -> OMDeviceParser:\n log = getLogger('om.parser')\n if not cls._DEVICE_PARSERS:\n for modname in Devices.modules:\n devmod = import_module(modname)\n for iname in dir(devmod):\n item = getattr(devmod, iname)\n if not isinstance(item, Type):\n continue\n if not issubclass(item, OMDeviceParser):\n continue\n sname = iname.replace('OM', '').replace('DeviceParser', '')\n cls._DEVICE_PARSERS[sname.lower()] = item\n # default parser\n cls._DEVICE_PARSERS[''] = OMDeviceParser\n for dev, klass in cls._DEVICE_PARSERS.items():\n log.info(\"Registered %s for %s device\", klass.__name__,\n dev.upper() if dev else 'default')\n name = name.lower()\n try:\n parser = cls._DEVICE_PARSERS[name.lower()]\n log.warning('Use %s parser for device %s', parser.__name__, name)\n return parser\n except KeyError:\n log.warning('Use default parser for device %s', name)\n return cls._DEVICE_PARSERS['']",
"def get_document(self, doc_uri: str, accept_from_file: bool) -> Optional[IDocument]:",
"def _get_reader(self, import_file, sheet_name=None):\n try:\n return ExcelParser(import_file, sheet_name)\n except XLRDError as e:\n if 'Unsupported format' in str(e):\n return CSVParser(import_file)\n elif 'No sheet named' in str(e):\n raise SheetDoesNotExist(str(e))\n else:\n raise Exception('Cannot parse file')",
"def parser_for(cls, parser_name):\n return parser_name == 'generic_csvxls'",
"def get_parser(self, format):\n try:\n parser = self._parsers[format]\n except KeyError:\n raise ValueError(f\"{format} is not a registered format.\")\n return parser",
"def open_document(self, file_name):\n import uno\n file_url = uno.systemPathToFileUrl(abspath(file_name))\n\n if os.environ.get('OSTYPE', False) == 'FreeBSD':\n # Workaround a problemas con OpenOffice 3.1 en FreeBSD\n file_url = file_url.encode('UTF-8')\n\n load_properties = { \"Hidden\": True }\n file_ext = splitext(file_name)[1]\n file_ext = file_ext and file_ext[1:].lower() or None\n if self.IMPORT_FILTER_MAP.has_key(file_ext):\n load_properties.update(self.IMPORT_FILTER_MAP[file_ext])\n\n try:\n document = self.desktop.loadComponentFromURL(file_url, \"_blank\", 0, self.make_properties(load_properties))\n except Exception, ex:\n raise OOHelperException(_(\"Error loading file %s with OpenOffice: %s\") % (file_name, ex))\n try:\n document.refresh()\n except AttributeError:\n #print \"Warning: Ignoring AttributeError on document refresh\"\n pass\n\n return document",
"def get_manager():\n if not hasattr(DocumentManager, '_instance'):\n DocumentManager._instance = DocumentManager(archive=archive)\n log.info(\"Loaded ingestors: %r\", DocumentManager._instance.ingestors)\n return DocumentManager._instance",
"def get_python_parser(version, debug_parser):\n if version < 3.0:\n import uncompyle6.parsers.parse2 as parse2\n p = parse2.Python2Parser(debug_parser)\n else:\n import uncompyle6.parsers.parse3 as parse3\n p = parse3.Python3Parser(debug_parser)\n p.version = version\n return p",
"def GetDocument(self):\n if self.parent:\n if isinstance(self.parent, Document):\n return self.parent\n else:\n return self.parent.GetDocument()\n else:\n return None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns confirmed infection cases for country 'Poland' given a date. Ex. >>> poland_cases_by_date(7, 3, 2020) 5 >>> poland_cases_by_date(11, 3) 31
|
def poland_cases_by_date(day: int, month: int, year: int = 2020) -> int:
# Your code goes here (remove pass)
y = year % 100
return confirmed_cases.loc[confirmed_cases["Country/Region"]=="Poland"][f'{month}/{day}/{y}'].values[0]
|
[
"def query_cases_by_date(self, start, end):\n start_str = start.strftime(\"%m-%d-%Y\")\n end_str = end.strftime(\"%m-%d-%Y\")\n date_query_url = \"https://{}GetRecentCivilCases/{}/{}\".format(\n self.api_base,\n start_str,\n end_str,\n )\n r = self.session.get(date_query_url)\n cases = r.json()[\"ResultList\"]\n\n # Normalize the date data\n normal_cases = []\n for case in cases:\n clean_case = {\n \"internal_case_id\": case[\"InternalCaseID\"],\n \"judge_code\": case[\"JudgeCode\"] or \"\",\n \"case_type_code\": case[\"CaseTypeCode\"] or \"\",\n }\n normal_cases.append(clean_case)\n\n return normal_cases",
"def collect_cases(self, start_date, end_date):\n self._validate_date_range(start_date, end_date)\n cases = set()\n for weekday in self.__get_date_range(start_date, end_date):\n cases |= self.get_court_cases(weekday)\n return cases",
"def get_court_cases(self, date_type):\n raise NotImplementedError",
"def no_new_cases_count(day: int, month: int, year: int = 2020) -> int:\n \n # Your code goes here (remove pass)\n date_now = datetime.date(year, month, day)\n date_prev = date_now - datetime.timedelta(days=1)\n pattern = '%#m/%#d/%y'\n \n num_of_countries = confirmed_cases.count()['Country/Region']\n num__with_new_cases = confirmed_cases[confirmed_cases[date_now.strftime(pattern)] == confirmed_cases[date_prev.strftime(pattern)]].count()['Country/Region']\n return num_of_countries - num__with_new_cases",
"def top5_countries_by_date(day: int, month: int, year: int = 2020) -> List[str]:\n\n # Your code goes here (remove pass)\n y = year % 100\n data=f'{month}/{day}/{y}'\n top = confirmed_cases.groupby([\"Country/Region\"]).max().sort_values(by=data).tail(5).iloc[:,0].keys().tolist()[::-1]\n return top",
"def get_stats(self,date):\n result = []\n for province_name in self.provinces:\n result.append(COVIDStats(date = date,\n place_name = province_name,\n confirmados = self.df_provinces.loc['CONFIRMADOS'].loc[province_name][date],\n muertos = self.df_provinces.loc['MUERTOS'].loc[province_name][date],\n recuperados = self.df_provinces.loc['RECUPERADOS'].loc[province_name][date],\n activos = self.df_provinces.loc['ACTIVOS'].loc[province_name][date]))\n return result",
"def get_covid_cases_by_country(country):\n try:\n response = requests.get(API_URL)\n response_json = json.loads(response.text)\n\n # The API undergoes caching periodically,\n # during which it is unavailable.\n if response_json['Message'] == 'Caching in progress':\n return 'API Caching in progress, please try again later.'\n\n # Search for the country in the summary response\n country_data = next((data for data in response_json['Countries'] if data['Country'].lower() == country.lower()), {})\n\n if country_data:\n # Return the stats if the country is found\n covid_info = '\\\n COVID-19 Cases for {Country} ({CountryCode}) - New Confirmed Cases: {NewConfirmed}, \\\n Total Confirmed Cases: {TotalConfirmed}, \\\n New Deaths: {NewDeaths}, \\\n Total Deaths: {TotalDeaths}, \\\n New Recovered Cases: {NewRecovered}, \\\n Total Recovered Cases: {TotalRecovered}'\n \n # Remove any extra whitespace and fill the string\n # with keys from the data dictionary\n return re.sub(r\"\\s\\s+\" , \" \", covid_info.format(**country_data))\n\n else:\n return 'Invalid Country Name'\n\n except Exception as e:\n print(e)\n return 'Error Fetching COVID-19 Data'",
"def process(df):\n dates = df['date'].values\n cases = df['cases'].values\n dates = dates[cases > 0 ]\n cases = cases[cases > 0 ]\n cases = np.cumsum(cases)\n # use preprocess cases functions here\n cases = ratioincrease(cases, timelag = 14)\n # cases = minmax(cases)\n\n return dates, cases",
"def get_new_cases(case='Confirmed'):\n case = case.title()\n data = load_data()\n new = {}\n for country1 in data[list(data)[-1]]:\n for country2 in data[list(data)[-2]]:\n if country1['Country_Region'] == country2['Country_Region']:\n if country1[case] > country2[case]:\n new[country1['Country_Region']]=(country1[case] - country2[case])\n return {k:v for k, v in sorted(new.items(), key=lambda i: i[1], reverse=True)}",
"def national_covid_cases() -> str:\n national_api = Cov19API(\n filters=[\"areaName=England\"],\n structure=covid_struct[\"cases_and_deaths\"],\n latest_by=\"newCasesByPublishDate\"\n )\n response = requests.get(\"https://api.coronavirus.data.gov.uk/v1/data\", params=national_api.api_params, timeout=10)\n if response.status_code != 200:\n logging.error(response.json()['response'])\n return \"An error has occurred, see logging for more details.\"\n national_data = national_api.get_json()[\"data\"][0]\n local_data = api.get_json()[\"data\"][0]\n national_new_cases = national_data['newCasesByPublishDate']\n local_new_cases = local_data['newCasesByPublishDate']\n date = national_data[\"date\"]\n return \"On \" + str(date) + \" there are \" + str(national_new_cases) + \" new cases of covid_alarm_clock-19 in England and \" + \\\n str(local_new_cases) + \" in Exeter.\"",
"def search_by_date(\n self,\n start_date=None,\n end_date=None,\n case_details=False,\n case_types=[],\n download_dir=None,\n headless=True\n ):\n if not start_date:\n start_date, end_date = self.current_day, self.current_day\n results = []\n county = self.place_id[3:] # Clip the state prefix from place_id\n if case_details:\n results = self.search(\n start_date=start_date,\n end_date=end_date,\n case_types=case_types,\n download_dir=download_dir or self.get_download_dir(),\n headless=headless\n )\n else:\n # Case metadata can be gathered using just Requests\n date_format = \"%m-%d-%Y\"\n dates = dates_for_range(start_date, end_date, output_format=date_format)\n for date_str in dates:\n api = SearchApi(county)\n extra_params = {}\n if case_types:\n extra_params['caseType'] = ','.join(case_types)\n cases = api.search_by_filing_date(\n date_str,\n date_str,\n extra_params\n )\n results.extend(cases)\n return results",
"def nyt_cases_counties(df):\n # Cast date as datetime\n df['date'] = pd.to_datetime(df['date'])\n # Drop records with county = 'Unknown' or no FIPs code\n df = df.loc[(df['county'] != 'Unknown') & (df['fips'].notnull())].copy()\n # Store FIPS codes as standard 5 digit strings\n df['fips'] = _fips_cleaner(df['fips'])\n # Drop FIPs that are not part of US states, cast deaths to int\n df = df.loc[df['fips'].str.slice(0,2) <= '56'].copy()\n df['deaths'] = df['deaths'].astype(int)\n return df",
"def GetCombinationsForDate(date):\n dates = []\n # Thursday\n thursday = date\n # Friday\n friday = thursday + datetime.timedelta(days=1)\n # Sunday\n sunday = friday + datetime.timedelta(days=2)\n # Monday\n monday = sunday + datetime.timedelta(days=1)\n # Tuesday\n tuesday = monday + datetime.timedelta(days=1)\n dates.append([thursday.strftime(\"%Y-%m-%d\"), sunday.strftime(\"%Y-%m-%d\")])\n dates.append([friday.strftime(\"%Y-%m-%d\"), sunday.strftime(\"%Y-%m-%d\")])\n dates.append([thursday.strftime(\"%Y-%m-%d\"), monday.strftime(\"%Y-%m-%d\")])\n dates.append([friday.strftime(\"%Y-%m-%d\"), monday.strftime(\"%Y-%m-%d\")])\n dates.append([thursday.strftime(\"%Y-%m-%d\"), tuesday.strftime(\"%Y-%m-%d\")])\n dates.append([friday.strftime(\"%Y-%m-%d\"), tuesday.strftime(\"%Y-%m-%d\")])\n\n return dates",
"def local_covid_cases() -> None:\n engine3 = pyttsx3.init()\n response = requests.get(\"https://api.coronavirus.data.gov.uk/v1/data\", params=api.api_params, timeout=10)\n if response.status_code != 200:\n logging.error(response.json()['response'])\n return None\n data = api.get_json()[\"data\"][0]\n date = data[\"date\"]\n new_cases = data['newCasesByPublishDate']\n engine3.say(\"On \" + str(date) + \" there are \" + str(new_cases) + \" new cases of covid_alarm_clock-19 in \" + location[\"my-city\"])\n engine3.runAndWait()\n # Log alarm\n logging.info(\"Local covid_alarm_clock cases were announced at: \" + time.strftime(\"%H:%M\", time.localtime()))",
"def get_attacks_info_by_date(self, date):",
"def rates_by_date(rate_date: str) -> Any:\n\n time.sleep(randint(1, 5) * 0.055)\n\n for rate in rates:\n\n if rate[\"Date\"] == rate_date:\n\n base_country = request.args.get(\"base\", \"EUR\")\n\n if \"symbols\" in request.args:\n country_symbols = request.args[\"symbols\"].split(\",\")\n else:\n country_symbols = [col for col in rate if col != \"Date\"]\n\n country_rates = {\n country_code: country_rate / rate[base_country]\n for (country_code, country_rate) in rate.items()\n if country_code != \"Date\" and\n country_code in country_symbols and\n not math.isnan(country_rate)\n }\n\n return jsonify({\n \"date\": rate[\"Date\"],\n \"base\": base_country,\n \"rates\": country_rates,\n })\n\n abort(404)",
"def test_filter_start_date_today():\n data = test_base.load_test_data()\n filtered_data = challenge1_1.filter_by_start_date(data, \"2018-05-28\")\n assert filtered_data.count()[0] == 1",
"def get_ny_covid_data_by_date(test_date):\n json_data = \"\"\n # Call New York Api to get data\n result = requests.get(NY_API, test_date)\n if result.status_code == 200:\n print(\"Call to api successful\")\n if not result.json():\n raise ValueError(\n \"Data not available for \" + test_date['test_date'] + \" yet. Please try after sometime\")\n # Getting json data from response\n json_data = result.json()\n return json_data\n else:\n print(\"Error in Api Call\")\n return json_data",
"def precip_check(p_dates, p_vals, at_dates, at_vals, interval, cutoff):\n bad_val = 6999\n o_val = []\n index = 0\n while (index < len(p_dates)):\n if not(p_dates[index] == at_dates[index]):\n print_center(\"dates do not match for precipitation and air temp\"\n ,'*')\n exit_on_failure()\n # how to handle this?\n print (p_dates[index], p_vals[index], at_dates[index], at_vals[index] )\n date = p_dates[index].replace(year = 1004)\n if not(csvd.is_in_interval(date, interval)):\n o_val.insert(index, bad_val)\n\n elif (at_vals[index] < cutoff):\n o_val.insert(index, bad_val)\n else:\n o_val.insert(index, p_vals[index])\n index += 1\n\n return o_val"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the top 5 infected countries given a date (confirmed cases). Ex. >>> top5_countries_by_date(27, 2, 2020) ['China', 'Korea, South', 'Cruise Ship', 'Italy', 'Iran'] >>> top5_countries_by_date(12, 3) ['China', 'Italy', 'Iran', 'Korea, South', 'France']
|
def top5_countries_by_date(day: int, month: int, year: int = 2020) -> List[str]:
# Your code goes here (remove pass)
y = year % 100
data=f'{month}/{day}/{y}'
top = confirmed_cases.groupby(["Country/Region"]).max().sort_values(by=data).tail(5).iloc[:,0].keys().tolist()[::-1]
return top
|
[
"def get_top_five_countries():\r\n\r\n lines = country_pop.split('\\n')\r\n co = []\r\n for line in lines:\r\n country= line.split('\\t')\r\n co.append(country)\r\n\r\n\r\n topfive= []\r\n for i in co[1:6]:\r\n topfive.append(i[1])\r\n return topfive",
"def compute_names_by_map_set_country(iterable, c_code, top_x=3):\n dic = {}\n country_dic = {}\n for stats in iterable:\n if stats[0] in dic:\n dic[stats[0]] += 1\n else:\n dic[stats[0]] = 1\n # Remember city names that exist in given country.\n if stats[1] == c_code:\n country_dic[stats[0]] = 1\n # Check if dictionary is empty.\n if not dic:\n return []\n # Sort descending and return.\n sorted_dic_list = []\n for k, v in sorted(dic.items(), key=lambda dic: dic[1], reverse=True):\n if k in country_dic:\n sorted_dic_list.append((k, v))\n return sorted_dic_list[:top_x]",
"def date_top_industries(prices, sector, date, top_n):\n # TODO: Implement Function\n\n return set(sector.loc[prices.loc[date].nlargest(top_n).index])",
"def available_dates(dates=List, n=int, countries=List, lang=str):\n\n missing = Counter(countries) if countries != 'nogeo' else None\n rest = []\n dates = dates[::-1]\n while len(dates) and (len(rest) < n or n == -1):\n day = dates.pop()\n flag = True\n iter = missing.most_common() if missing is not None else [[None, None]]\n for country, _ in iter:\n try:\n download_tokens(day, lang=lang, \n country=country if country is not None else 'nogeo')\n except Exception:\n flag = False\n if missing is not None:\n missing.update([country]) \n break\n if flag:\n rest.append(day)\n return rest",
"def get_top_expenses_data(date, next_date):\n data = []\n\n if date is None:\n expenses = Expense.objects().order_by('-amount').limit(10)\n else:\n expenses = []\n num = 1\n for expense in Expense.objects().order_by('-amount'):\n if expense.date >= date and expense.date <= next_date and num <= 10:\n expenses.append(expense)\n num += 1\n\n for expense in expenses:\n data.append({\n 'name': expense.name,\n 'amount': expense.amount,\n 'date': expense.date\n })\n\n return data",
"def top_cities():\n day = request.args.get('day', datetime.now().strftime(\"%Y%m%d\"), type=str)\n num = request.args.get('num', 1, type=int)\n\n # input validation\n if num <= 1:\n num = 1\n\n try:\n datetime.strptime(day, '%Y%m%d')\n except ValueError:\n day = datetime.now().strftime(\"%Y%m%d\") # when missing, set today as default date\n\n return jsonify(get_top_cities(day, num))",
"def get_customer_features(username, country, min_date, max_date):\n \n db=create_connection(username)\n country=country.title()\n\n query=f'select * from tuscany.customer_feature cus \\\n join tuscany.mcc mc \\\n on mc.mcc=cus.mcc \\\n where mc.country={country!r} \\\n and (cus.st_time between {min_date!r} and {max_date!r}) \\\n and cus.customer_id not in (select customer_id from tuscany.excluded_customers)'\n\n df = db.sql_query_to_data_frame(query, cust_id=True)\n df2 = digitalize_features(df)\n return df2",
"def get_stats(self,date):\n result = []\n for province_name in self.provinces:\n result.append(COVIDStats(date = date,\n place_name = province_name,\n confirmados = self.df_provinces.loc['CONFIRMADOS'].loc[province_name][date],\n muertos = self.df_provinces.loc['MUERTOS'].loc[province_name][date],\n recuperados = self.df_provinces.loc['RECUPERADOS'].loc[province_name][date],\n activos = self.df_provinces.loc['ACTIVOS'].loc[province_name][date]))\n return result",
"def rates_by_date(rate_date: str) -> Any:\n\n time.sleep(randint(1, 5) * 0.055)\n\n for rate in rates:\n\n if rate[\"Date\"] == rate_date:\n\n base_country = request.args.get(\"base\", \"EUR\")\n\n if \"symbols\" in request.args:\n country_symbols = request.args[\"symbols\"].split(\",\")\n else:\n country_symbols = [col for col in rate if col != \"Date\"]\n\n country_rates = {\n country_code: country_rate / rate[base_country]\n for (country_code, country_rate) in rate.items()\n if country_code != \"Date\" and\n country_code in country_symbols and\n not math.isnan(country_rate)\n }\n\n return jsonify({\n \"date\": rate[\"Date\"],\n \"base\": base_country,\n \"rates\": country_rates,\n })\n\n abort(404)",
"def get_coverage_info(fuzzer, date=None):\n query = data_types.CoverageInformation.query(\n data_types.CoverageInformation.fuzzer == fuzzer)\n if date:\n # Return info for specific date.\n query = query.filter(data_types.CoverageInformation.date == date)\n else:\n # Return latest.\n query = query.order(-data_types.CoverageInformation.date)\n\n return query.get()",
"def get_summary(date):\r\n \r\n date2 = datetime.datetime.strptime(date, '%Y-%m-%d') - datetime.timedelta(days=7)\r\n date2 = datetime.datetime.strftime(date2, '%Y-%m-%d')\r\n \r\n q_string = \"\"\"\r\n\tSELECT cid, state_code, SUM(delegates_won)\r\n FROM summary\r\n WHERE sdate <= '{}' AND sdate >= '{}'\r\n GROUP BY cid, state_code;\r\n \"\"\".format(date, date2)\r\n try:\r\n cursor.execute(q_string)\r\n result = cursor.fetchall()\r\n except:\r\n print(\"ERROR: Could not fetch summary data\")\r\n sys.exit()\r\n\r\n # Parse and transform into list.\r\n summary_list = []\r\n for tup in result:\r\n summary_list.append([\"{}\".format(tup[0]), \"{}\".format(tup[1]), \r\n \"{}\".format(tup[2])])\r\n \r\n # Convert to pandas dataframes\r\n summary = pandas.DataFrame.from_records(summary_list)\r\n summary.columns = ['cid', 'state_code', 'delegates_won']\r\n summary['cid'] = summary['cid'].astype(int)\r\n summary['state_code'] = summary['state_code'].astype(str)\r\n summary['delegates_won'] = summary['delegates_won'].astype(int)\r\n \r\n return summary",
"def get_attacks_info_by_date(self, date):",
"def get_ny_covid_data_by_date(test_date):\n json_data = \"\"\n # Call New York Api to get data\n result = requests.get(NY_API, test_date)\n if result.status_code == 200:\n print(\"Call to api successful\")\n if not result.json():\n raise ValueError(\n \"Data not available for \" + test_date['test_date'] + \" yet. Please try after sometime\")\n # Getting json data from response\n json_data = result.json()\n return json_data\n else:\n print(\"Error in Api Call\")\n return json_data",
"def get_country_names(df):\n cols = list(df.columns)\n cols.pop(0) # remove Date\n return cols",
"def get_data_from_country(country_name: str) -> list:\n\n url = f\"https://api.covid19api.com/total/dayone/country/{country_name}\"\n\n payload = {}\n headers = {}\n\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n\n data = response.json()\n\n return data",
"def get_ice_cover(from_date, to_date, region_ids=None, location_id=None, countries=None, time_zone=None,\n group_id=None, observer_ids=None, observer_nick=None, observer_competence=None,\n output='List', lang_key=1):\n\n return _get_general(51, from_date=from_date, to_date=to_date, region_ids=region_ids, location_id=location_id,\n countries=countries, time_zone=time_zone, group_id=group_id, observer_ids=observer_ids,\n observer_nick=observer_nick, observer_competence=observer_competence, output=output,\n lang_key=lang_key)",
"def list_of_countries(country_data: List[CountryTemperature], year: int) -> set:\r\n return {row.country for row in country_data if int(row.date.strftime(\"%Y\")) > year and row.temperature is not None}",
"def get_covid_cases_by_country(country):\n try:\n response = requests.get(API_URL)\n response_json = json.loads(response.text)\n\n # The API undergoes caching periodically,\n # during which it is unavailable.\n if response_json['Message'] == 'Caching in progress':\n return 'API Caching in progress, please try again later.'\n\n # Search for the country in the summary response\n country_data = next((data for data in response_json['Countries'] if data['Country'].lower() == country.lower()), {})\n\n if country_data:\n # Return the stats if the country is found\n covid_info = '\\\n COVID-19 Cases for {Country} ({CountryCode}) - New Confirmed Cases: {NewConfirmed}, \\\n Total Confirmed Cases: {TotalConfirmed}, \\\n New Deaths: {NewDeaths}, \\\n Total Deaths: {TotalDeaths}, \\\n New Recovered Cases: {NewRecovered}, \\\n Total Recovered Cases: {TotalRecovered}'\n \n # Remove any extra whitespace and fill the string\n # with keys from the data dictionary\n return re.sub(r\"\\s\\s+\" , \" \", covid_info.format(**country_data))\n\n else:\n return 'Invalid Country Name'\n\n except Exception as e:\n print(e)\n return 'Error Fetching COVID-19 Data'",
"def poland_cases_by_date(day: int, month: int, year: int = 2020) -> int:\n \n # Your code goes here (remove pass)\n \n y = year % 100\n return confirmed_cases.loc[confirmed_cases[\"Country/Region\"]==\"Poland\"][f'{month}/{day}/{y}'].values[0]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the number of countries/regions where the infection count in a given day was the same as the previous day. Ex. >>> no_new_cases_count(11, 2, 2020) 35 >>> no_new_cases_count(3, 3) 57
|
def no_new_cases_count(day: int, month: int, year: int = 2020) -> int:
# Your code goes here (remove pass)
date_now = datetime.date(year, month, day)
date_prev = date_now - datetime.timedelta(days=1)
pattern = '%#m/%#d/%y'
num_of_countries = confirmed_cases.count()['Country/Region']
num__with_new_cases = confirmed_cases[confirmed_cases[date_now.strftime(pattern)] == confirmed_cases[date_prev.strftime(pattern)]].count()['Country/Region']
return num_of_countries - num__with_new_cases
|
[
"def nyt_cases_counties(df):\n # Cast date as datetime\n df['date'] = pd.to_datetime(df['date'])\n # Drop records with county = 'Unknown' or no FIPs code\n df = df.loc[(df['county'] != 'Unknown') & (df['fips'].notnull())].copy()\n # Store FIPS codes as standard 5 digit strings\n df['fips'] = _fips_cleaner(df['fips'])\n # Drop FIPs that are not part of US states, cast deaths to int\n df = df.loc[df['fips'].str.slice(0,2) <= '56'].copy()\n df['deaths'] = df['deaths'].astype(int)\n return df",
"def calculate_cases_number(self):\n self.cases_number = self.dataframe[\"case:concept:name\"].nunique()",
"def dayCount(self):\n return len(self._days)",
"def getTodaysCount(self):\n today = DateTime().strftime('%Y%m%d')\n \n return self.day_count.get(today, 0)",
"def case_count(self):\n nr_cases = 0\n case_call = self.get_command()\n case_call.extend([\"cases\", \"--count\"])\n output = \"\"\n try:\n output = execute_command(case_call)\n except CalledProcessError:\n LOG.warning(\"Something went wrong with loqus\")\n return nr_cases\n\n try:\n nr_cases = int(output.strip())\n except ValueError:\n pass\n\n return nr_cases",
"def get_new_cases(case='Confirmed'):\n case = case.title()\n data = load_data()\n new = {}\n for country1 in data[list(data)[-1]]:\n for country2 in data[list(data)[-2]]:\n if country1['Country_Region'] == country2['Country_Region']:\n if country1[case] > country2[case]:\n new[country1['Country_Region']]=(country1[case] - country2[case])\n return {k:v for k, v in sorted(new.items(), key=lambda i: i[1], reverse=True)}",
"def get_county_number():\n return len(get_counties())",
"def get_country_count():\r\n\r\n lines = country_pop.split('\\n')\r\n return len(lines)-1\r\n return len(country_pop.split('\\n'))-1",
"def get_hours_over_8_count(self):\n count = 0\n for day in ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']:\n day_count = 0\n for assignment in self._schedule:\n if assignment.day == day:\n day_count += assignment.end - assignment.start\n count += max(0, day_count - 8)\n return count",
"def get_number_of_indels(self):\n indel_counter = 0\n with open(file_1) as my_vcf_fh:\n vcf_reader = vcf.Reader(my_vcf_fh)\n for record in vcf_reader:\n if record.is_indel:\n indel_counter += 1\n return indel_counter",
"def getNumDays(self, curDateTime, expDateTime):\n return (expDateTime - curDateTime).days",
"def _get_num_closed_tix(self, from_date, at_date, req, ticketFilter=\"\"):\n\n status_map = {\n 'new': 0,\n 'reopened': 0,\n 'assigned': 0,\n 'closed': 1,\n 'edit': 0\n }\n\n count = 0\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n cursor.execute(\"\"\"\n SELECT t.id, tc.field, tc.time, tc.oldvalue, tc.newvalue,\n t.priority\n FROM ticket_change tc\n INNER JOIN ticket t ON t.id = tc.ticket\n INNER JOIN enum p ON p.name = t.priority AND p.type = 'priority'\n WHERE tc.time > %s AND tc.time <= %s %s\n ORDER BY tc.time\n \"\"\" % (to_timestamp(from_date), to_timestamp(at_date),\n ticketFilter))\n\n for tid, field, time, old, status, priority in cursor:\n if field == 'status':\n if status in ('new', 'assigned', 'reopened', 'closed', 'edit'):\n count += status_map[status]\n\n return count",
"def infections_by_time(infected_count, days):\n\n return infected_count * (2**(days//3))",
"def get_covid_cases_by_country(country):\n try:\n response = requests.get(API_URL)\n response_json = json.loads(response.text)\n\n # The API undergoes caching periodically,\n # during which it is unavailable.\n if response_json['Message'] == 'Caching in progress':\n return 'API Caching in progress, please try again later.'\n\n # Search for the country in the summary response\n country_data = next((data for data in response_json['Countries'] if data['Country'].lower() == country.lower()), {})\n\n if country_data:\n # Return the stats if the country is found\n covid_info = '\\\n COVID-19 Cases for {Country} ({CountryCode}) - New Confirmed Cases: {NewConfirmed}, \\\n Total Confirmed Cases: {TotalConfirmed}, \\\n New Deaths: {NewDeaths}, \\\n Total Deaths: {TotalDeaths}, \\\n New Recovered Cases: {NewRecovered}, \\\n Total Recovered Cases: {TotalRecovered}'\n \n # Remove any extra whitespace and fill the string\n # with keys from the data dictionary\n return re.sub(r\"\\s\\s+\" , \" \", covid_info.format(**country_data))\n\n else:\n return 'Invalid Country Name'\n\n except Exception as e:\n print(e)\n return 'Error Fetching COVID-19 Data'",
"def get_num_attacks_per_day():",
"def get_total_cured_discharged_cases(self):\n parsed_data = self.__get_response()\n cured_discharged_cases_section = parsed_data.find(\"li\", {\"class\": \"bg-green\"}).find_all(\"strong\", {\"class\": \"mob-hide\"})[1]\n total_cured_discharged_cases = str(cured_discharged_cases_section.text).split()[0]\n return int(total_cured_discharged_cases)",
"def national_covid_cases() -> str:\n national_api = Cov19API(\n filters=[\"areaName=England\"],\n structure=covid_struct[\"cases_and_deaths\"],\n latest_by=\"newCasesByPublishDate\"\n )\n response = requests.get(\"https://api.coronavirus.data.gov.uk/v1/data\", params=national_api.api_params, timeout=10)\n if response.status_code != 200:\n logging.error(response.json()['response'])\n return \"An error has occurred, see logging for more details.\"\n national_data = national_api.get_json()[\"data\"][0]\n local_data = api.get_json()[\"data\"][0]\n national_new_cases = national_data['newCasesByPublishDate']\n local_new_cases = local_data['newCasesByPublishDate']\n date = national_data[\"date\"]\n return \"On \" + str(date) + \" there are \" + str(national_new_cases) + \" new cases of covid_alarm_clock-19 in England and \" + \\\n str(local_new_cases) + \" in Exeter.\"",
"def get_total_death_cases(self):\n parsed_data = self.__get_response()\n death_cases_section = parsed_data.find(\"li\", {\"class\": \"bg-red\"}).find_all(\"strong\", {\"class\": \"mob-hide\"})[1]\n total_death_cases = str(death_cases_section.text).split()[0]\n return int(total_death_cases)",
"def count_gom_existing_events(jsonpath):\n f = open(jsonpath, 'r')\n data = json.load(f)\n events_array = data[\"Observation\"][\"Events\"][\"Event\"]\n #plus 1 for the latest event\n return len(events_array)+1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Method that performs optimization using the simulated annealing method. Notes
|
def run_optimization(self, f, parameters, constraints=None):
assert constraints is None, "Simulated Annealing optimizer cannot handle restraints."
print("!=================================================================================!")
print("! STARTING SIMULATED ANNEALING OPTIMIZER !")
print("!=================================================================================!")
# Initial temperature
t_init = - 1.0 / np.log(self._p_init)
# Final temperature
t_final = - 1.0 / np.log(self._p_final)
# Fractional reduction every cycle
frac = (t_final / t_init) ** (1.0 / (self._n_iter - 1.0))
temp = t_init
# Choose random seed for the process
np.random.seed(np.random.randint(2 ** 32 - 1))
n_param = len(parameters)
# First objective function minimization
error, old_f = f(parameters)
best_f = old_f
best_parameters = copy.deepcopy(parameters)
for ntemp in range(self._n_iter):
# Initiate another MC optimization at a given temperature
acc = [0.0 for p in range(n_param)]
p_max = [copy.deepcopy(parameters[p]) * 0.5 for p in range(n_param)]
print("Starting new temperature...")
for sweep in range(1, 100):
p_max = [p_max[p] * ((acc[p] / float(sweep) - self._avg_acceptance_rate) + 1) for p in range(n_param)]
# parameters_temp = copy.deepcopy(parameters)
for n in range(n_param):
# Create neighbour solution
p = np.random.randint(0, n_param) # Select randomly a parameter
p_dummy = copy.deepcopy(parameters[p])
parameters[p] += np.random.uniform(-p_max[p], p_max[p])
error, new_f = f(parameters)
delta_f = new_f - old_f
if delta_f < 0:
if new_f < best_f:
best_f = new_f
best_parameters = copy.deepcopy(parameters)
old_f = new_f
acc[p] += 1.0
print("\nMC move accepted (delta_f < 0).")
print("Error: ", error)
print("Objective function value: {}".format(new_f))
else:
prob = np.exp(- (new_f - old_f) / temp)
if prob > np.random.random():
old_f = new_f
acc[p] += 1.0
print("\n MC move accepted (Metropolis).")
print("Error: ", error)
print("Objective function value: {}".format(new_f))
else:
parameters[p] = p_dummy
# print(np.sqrt(np.sum(( (np.asarray(parameters_temp)-np.asarray(parameters)) / np.asarray(parameters_temp) )**2) / n_param))
# Lower the temperature for next cycle
temp = temp * frac
print("Acceptance rate: " + str(sum(acc) / ((sweep) * n_param)))
print("Convergence was achieved after {} MC sweeps.".format(sweep))
print("Last objective function value is {} .".format(new_f))
print("!=================================================================================!")
print("! SIMULATED ANNEALING OPTIMIZER TERMINATED SUCCESSFULLY! :) !")
print("!=================================================================================!")
return best_parameters
|
[
"def optimize(self):\n raise NotImplementedError",
"def solve_sa(f, n=numpy.inf, m=numpy.inf, verbose=True):\n\n print 'Running simulated annealing...',\n if n < numpy.inf: print 'for %.f steps' % n\n if m < numpy.inf: print 'for %.2f minutes' % m\n\n t = time.time()\n a, k, s, v = 0, 0, 0, 1e-10\n best_obj, best_soln = -numpy.inf, None\n curr_soln = binary.product.ProductBinary.uniform(d=f.d).rvs()\n curr_obj = f.lpmf(curr_soln)\n\n while True:\n\n k += 1\n\n # update break criterion\n if n is numpy.inf:\n r = (time.time() - t) / (60.0 * m)\n else:\n r = k / float(n)\n\n # show progress bar\n if verbose:\n if r - s >= 0.01:\n utils.auxi.progress(r, 'ar: %.3f, obj: %.1f, time %s' % (a / float(k), best_obj, utils.auxi.time(time.time() - t)))\n s = r\n\n if r > 1:\n if verbose: utils.auxi.progress(1.0, ' obj: %.1f, time %s' % (best_obj, utils.auxi.time(time.time() - t)))\n break\n\n # generate proposal\n proposal = curr_soln.copy()\n index = numpy.random.randint(0, f.d)\n proposal[index] = proposal[index] ^ True\n proposal_obj = f.lpmf(proposal)\n\n if best_obj < proposal_obj:\n best_obj = proposal_obj\n best_soln = proposal.copy()\n\n if (proposal_obj - curr_obj) * v > numpy.log(numpy.random.random()):\n a += 1\n curr_soln = proposal\n curr_obj = proposal_obj\n\n if a / float(k) < (r + 1) ** -5: v *= 0.995\n else: v *= 1.005\n\n if verbose: print\n return {'obj' : best_obj, 'soln' : best_soln, 'time' : time.time() - t}",
"def annealingoptimize(domain, costf, T=10000.0, cool=0.95, step=1):\n # Initialize the value randomly\n vec = [int(random.randint(domain[i][0], domain[i][1]))\n for i in range(len(domain))]\n # changed float to int: [7, 1, 6, 5, 6, 8, 7, 8, 0, 3, 1, 4]\n\n while T > 0.1:\n # Choose one of the indices\n i = random.randint(0, len(domain) - 1) # pick randomly within the domain\n\n # Choose a direction to change it\n dir = random.randint(-step, step) # random between up, down or stay same\n\n # Create a new list with one of the values changed\n vecb = vec[:]\n vecb[i] += dir # one of the elements in vec list changes\n if vecb[i] < domain[i][0]:\n vecb[i] = domain[i][0]\n elif vecb[i] > domain[i][1]:\n vecb[i] = domain[i][1]\n\n # Calculate the current cost and new cost\n ea = costf(vec) # cost of incumbent vec\n eb = costf(vecb) # cost of new vec\n p = pow(math.e, (-ea - eb) / T) # probability of higher cost solution being accepted\n # At high T, exponent becomes 0 and probability almost 1 -> willing to accept worse solution\n # As T gets lower, difference (high-low) becomes more important, p gets lower\n\n # Is it better, or does it make the probability cutoff?\n if (eb < ea or random.random() < p): # if new cost is lower or probability is high\n vec = vecb # adopt the new vec\n\n # Decrease the temperature\n T = T * cool\n\n return vec",
"def main():\n generation = 0\n best_accuracy = 0.0\n networks = [NeuralNetwork() for _ in range(population)]\n best_weights = []\n best_biases = []\n\n \"\"\"Main genetic loop\"\"\"\n while best_accuracy < 0.9 and generation < 100:\n generation += 1\n print(\"========== Generation number \", generation, \" ==========\")\n\n \"\"\"Fitness in genetic - nn accuracy and choice of best one\"\"\"\n for nn in networks:\n current_accuracy = nn.calculate_accuracy(x_train.T, y_train)\n if current_accuracy > best_accuracy:\n best_accuracy = current_accuracy\n print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ Best Accuracy: ', best_accuracy)\n best_weights.clear()\n best_biases.clear()\n for layer in nn.layers:\n best_weights.append(layer.weights)\n best_biases.append(layer.biases)\n\n \"\"\"Sort networks by fitness function value\"\"\"\n networks = sorted(networks, key=lambda z: z.accuracy, reverse=True)\n print(networks[0].layers[0].weights)\n\n \"\"\"Pick top individuals and make crossovers also with some chance to mutation\"\"\"\n new_generation = []\n for i in range(top_pick):\n for j in range(population//top_pick):\n nn1 = copy.deepcopy(networks[i])\n nn2 = copy.deepcopy(networks[random.randint(0, top_pick)])\n locus = random.randint(0, 1)\n for idx, layer in enumerate(nn1.layers):\n for index, neuron in enumerate(layer.weights):\n tmp = neuron[locus]\n neuron[locus] = nn2.layers[idx].weights[index][locus]\n nn2.layers[idx].weights[index][locus] = tmp\n if random.randint(0, 100) < mutation_chance:\n # print(\"MUTATION!\")\n # layer.weights[locus] = np.negative(layer.weights[locus])\n # layer.weights[locus] = np.random.randn(np.size(layer.weights[locus]))\n neuron[locus] = np.random.randn()\n new_generation.append(nn1)\n new_generation.append(nn2)\n networks.clear()\n networks = copy.deepcopy(new_generation)\n\n print(\"Selection accuracy: \")\n print(best_accuracy)\n\n \"\"\"Create new network and set start weights and biases to the best from genetic\"\"\"\n genetic_nn = NeuralNetwork()\n for idx, layer in enumerate(genetic_nn.layers):\n layer.weights = best_weights[idx]\n layer.biases = best_biases[idx]\n genetic_nn.train(x_train, y_train, 10, 10)\n genetic_nn.calculate_accuracy(x_train.T, y_train)\n\n print(\"Prediction accuracy: \")\n print(genetic_nn.accuracy)",
"def run_optimizer():\n\n # Build the model\n prob = om.Problem()\n\n indeps = prob.model.add_subsystem('indeps', om.IndepVarComp())\n prob.model.add_subsystem('myfunc', objective_function())\n\n # Optimizer\n prob.driver = om.ScipyOptimizeDriver()\n prob.driver.options['optimizer'] = 'COBYLA'#'SLSQP'\n\n # Variables\n for key, (name, listval, minval, maxval, command) in optim_var_dict.items():\n\n # Output, Connections and Design variables\n indeps.add_output(key, listval[0])\n prob.model.connect('indeps.'+key, 'myfunc.'+key)\n prob.model.add_design_var('indeps.'+key, lower=minval, upper=maxval)\n\n\n # Objective function\n prob.model.add_objective('myfunc.f_xy')\n\n #passnb = 440\n # define the component whose output will be constrained\n prob.model.add_subsystem('const', constraint())\n prob.model.add_constraint('const.passengers', upper=450, lower=440)\n\n # Run\n prob.setup()\n prob.run_driver()\n\n\n # Results (TODO: improve)\n log.info('=========================================')\n log.info('min = ' + str(prob['myfunc.f_xy']))\n \n iterations = arange(0,follower[\"Counter\"])\n\n plot(iterations, follower[\"optimVar\"])\n show()\n\n for key, (name, listval, minval, maxval, command) in optim_var_dict.items():\n log.info(name + ' = ' + str(prob['indeps.'+key]))\n\n log.info('Variable history')\n for key, (name, listval, minval, maxval, command) in optim_var_dict.items():\n log.info(name + ' => ' + str(listval))\n\n log.info('=========================================')",
"def main():\n\n # SETUP OBJECTIVE FUNCTION ############################\n\n if objective_func == \"sphere\":\n # Sphere ##########################\n from ailib.optimization.function.sphere import Function\n #f = Function(1)\n f = Function(2)\n #f = Function(10)\n\n elif objective_func == \"noised_sphere\":\n # Noised sphere ###################\n from ailib.optimization.function.noised_sphere import Function\n #f = Function(1)\n f = Function(2)\n\n elif objective_func == \"sin1\":\n # Sinusoid functions ##############\n from ailib.optimization.function.sin1 import Function\n f = Function()\n\n elif objective_func == \"sin2\":\n # Sinusoid functions ##############\n from ailib.optimization.function.sin2 import Function\n f = Function()\n\n elif objective_func == \"sin3\":\n # Sinusoid functions ##############\n from ailib.optimization.function.sin3 import Function\n f = Function()\n\n elif objective_func == \"yahoo\":\n # Yahoo function ##################\n from ailib.optimization.function.yahoo import Function\n f = Function()\n\n elif objective_func == \"deg_2_poly\":\n # Degree 2 polynomial function ####\n from ailib.optimization.function.degree_2_polynomial import Function\n f = Function(np.array([6.,2.]), np.array([1.,2.]), 1., 2)\n\n else:\n raise Exception(\"Wrong objective_func value.\")\n\n # Plot ########\n #f.plot()\n\n\n # OPTIMIZER ###########################################\n\n if optimizer_choice == \"naive\":\n # Naive Minimizer #################\n from ailib.optimization.optimizer.naive import Optimizer\n optimizer = Optimizer()\n best_x = optimizer.optimize(f, num_samples=300)\n\n elif optimizer_choice == \"gradient\":\n # Gradient descent ################\n from ailib.optimization.optimizer.gradient import Optimizer\n optimizer = Optimizer()\n f.delta = 0.01\n best_x = optimizer.optimize(f, num_iterations=30)\n\n elif optimizer_choice == \"saes\":\n # SAES ############################\n from ailib.optimization.optimizer.saes_hgb import Optimizer\n optimizer = Optimizer(x_init=np.ones(f.ndim), num_evals_func=lambda gen_index: math.floor(10. * pow(gen_index, 0.5)))\n optimizer = Optimizer(x_init=np.ones(f.ndim))\n best_x = optimizer.optimize(f, num_gen=50)\n\n elif optimizer_choice == \"cutting_plane\":\n # Cutting plane ###################\n from ailib.optimization.optimizer.cutting_plane import Optimizer\n optimizer = Optimizer()\n\n #best_x = optimizer.optimize(f, num_iterations=7) # sphere with 1 dimension\n #best_x = optimizer.optimize(f, num_iterations=15) # sphere with 2 dimensions\n #best_x = optimizer.optimize(f, num_iterations=100) # sphere with 10 dimensions\n\n #best_x = optimizer.optimize(f, parallel=\"linear\", num_iterations=7) # sphere with 1 dimension\n #best_x = optimizer.optimize(f, parallel=\"linear\", num_iterations=100) # sphere with 10 dimension\n\n #best_x = optimizer.optimize(f, parallel=\"gaussian\", num_iterations=7) # sphere with 1 dimension\n #best_x = optimizer.optimize(f, parallel=\"gaussian\", num_iterations=100) # sphere with 10 dimension\n\n best_x = optimizer.optimize(f, num_iterations=15) # sphere with 2 dimensions\n\n elif optimizer_choice == \"eda\":\n # EDA #############################\n #from ailib.optimization.optimizer.eda import Optimizer\n pass\n\n else:\n raise Exception(\"Wrong optimizer_choice value.\")\n\n print(\"Best sample: f(\", best_x, \") = \", f(best_x))",
"def bayesian_optimization(self, objective: evalset.test_funcs.TestFunction) -> Tuple[np.ndarray, np.ndarray]:\n \n # SET UP THE GP MODEL #\n bounds = objective.bounds\n dim = len(bounds)\n \n lik = GPy.likelihoods.Gaussian()\n lik.variance.constrain_fixed(self.noise**2, warning=False)\n noise = self.noise\n\n X0 = np.empty((0,dim))\n y = []\n yc = []\n \n def objective_modifier(x, f=None, batch_size=1):\n return np.concatenate(tuple( f(x[:,i*batch_size:(i+1)*batch_size]).reshape((-1,1)) for i in range(x.shape[1]//batch_size)), axis=1)\n \n \n # Initial observations:\n if self.use_comparative_observations_in_init:\n if self.random:\n X0 = util.random_sample(bounds, 2**dim)\n else:\n X0 = util.grid_sample(dim)\n yc = util.give_comparisons(objective.f, X0)\n if self.use_direct_observations_in_init:\n if self.random:\n Xn = util.random_sample(bounds, 2**dim)\n else:\n Xn = util.grid_sample(dim)\n yn = objective.f(Xn).reshape((-1,1))\n y = [(X0.shape[0] + i, yi) for i,yi in enumerate(yn)]\n X0 = np.concatenate((X0, Xn), axis=0)\n \n \n if not self.use_comparative_observations_in_init and not self.use_direct_observations_in_init:\n m = self.inference(util.static_sample(bounds), [(i, yi) for i,yi in enumerate(np.array([[0], [0]]))], yc, self.kernel.copy(), lik, get_logger=self.get_logger)\n else:\n m = self.inference(X0, y, yc, self.kernel.copy(), lik, get_logger=self.get_logger)\n\n # CREATE BO LOOP #\n bo_loop = create_bayesian_optimization_loop(m, bounds, self.batch_size, self.acquisition)\n \n # RUN THE LOOP #\n bo_loop.run_loop( partial(objective_modifier, f=objective.f, batch_size=self.batch_size), self.iterations)\n return m.X, m.yc",
"def AdaptiveSolve(self):\n pass",
"def run_snn_trial_1(images,\r\n F_weights,\r\n omega,\r\n thresholds,\r\n dt,\r\n leak,\r\n mu=0.,\r\n sigma_v=0.\r\n ):\r\n\r\n # initialize system\r\n N = F_weights.shape[0] # number of neurons\r\n num_bins = images.shape[2] # number of time bins\r\n firing_rates = np.zeros((N, num_bins))\r\n V_membrane = np.zeros(N)\r\n #print('init')\r\n\r\n\r\n # implement the Euler method to solve the differential equations\r\n for t in range(num_bins - 1):\r\n # compute command signal\r\n command_x = (images[:, :, t + 1] -\r\n images[:, :, t]) / dt + leak * images[:, :, t]\r\n\r\n #print(np.tensordot(F_weights, command_x, ([1,2],[0,1])))\r\n #print(-leak * V_membrane)\r\n #print(np.tensordot(F_weights, command_x, ([1,2],[0,1]))-leak * V_membrane)\r\n # update membrane potential\r\n V_membrane += dt * (-leak * V_membrane +\r\n np.tensordot(F_weights, command_x, ([1,2],[0,1]))\r\n ) + np.sqrt(2 * dt * leak) * sigma_v * np.random.randn(N)\r\n\r\n # update firing rates\r\n firing_rates[:, t + 1] = (1 - leak * dt) * firing_rates[:, t]\r\n\r\n # Check if any neurons are past their threshold during the last time-step\r\n diff_voltage_thresh = V_membrane - thresholds\r\n spiking_neurons_indices = np.arange(N)[diff_voltage_thresh >= 0]\r\n if spiking_neurons_indices.size > 0:\r\n # Pick the neuron which likely would have spiked first, by max distance from threshold\r\n to_pick = np.argmax(V_membrane[spiking_neurons_indices] - thresholds[spiking_neurons_indices])\r\n s = spiking_neurons_indices[to_pick]\r\n\r\n # Update membrane potential\r\n V_membrane[s] -= mu\r\n V_membrane += omega[:, s]\r\n\r\n # Update firing rates\r\n firing_rates[s, t + 1] += 1\r\n\r\n else:\r\n pass\r\n\r\n return firing_rates",
"def _optimize(self):\n raise NotImplementedError()",
"def optimize(modules):\n\n global module_optim\n module_optim = modules\n run_optimizer()",
"def anova_oneway_simulation(data, variables, effect_size, sample_size, alpha=0.05, n_repeats=15, weight_values=None,\n weight_threshold=0.8, modification_type='correlation', class_balance=0.5,\n multiple_testing_correction='fdr_by'):\n\n try:\n import warnings\n warnings.filterwarnings('ignore')\n if modification_type not in ['correlation', 'manual', 'proportion', 'correlation_weighted']:\n raise ValueError(\"modification_type argument not supported\")\n if modification_type == 'proportion' and not isinstance(variables, float):\n raise TypeError(\"When using \\'proportion\\' as modification_type \\'variables\\' must be a float\")\n\n # get the list of metrics calculated in scoreResults and update\n results = dict.fromkeys(score_metrics)\n for key in results.keys():\n results[key] = np.zeros((effect_size.size, sample_size.size, n_repeats))\n\n if multiple_testing_correction is not None:\n adjusted_results = dict.fromkeys(score_metrics)\n for key in adjusted_results.keys():\n adjusted_results[key] = np.zeros((effect_size.size, sample_size.size, n_repeats))\n adjusted_results['method'] = multiple_testing_correction\n\n n_vars = data.shape[1]\n # Loop over effect size, sample size and finally each monte carlo repeat\n for eff_idx, curr_effect in np.ndenumerate(effect_size):\n for ssize_idx, curr_ssize in np.ndenumerate(sample_size):\n for rep_idx in range(n_repeats):\n # Select samples to use\n ## Select a subset of the simulated spectra\n mod_data = np.copy(data[np.random.choice(data.shape[0], curr_ssize, replace=False), :])\n # if any option other than proportion\n if modification_type != 'proportion':\n # Modify only variables above a certain threshold of correlation\n var_to_mod = np.zeros(n_vars, dtype='int')\n var_to_mod[variables] = 1\n\n expected_hits = np.zeros(n_vars, dtype='int')\n expected_hits[var_to_mod == 1] = 1\n # If correlation and correlation_weighted\n if weight_values is not None and modification_type in [\"correlation\", \"correlation_weighted\"]:\n if weight_values.ndim == 1:\n var_to_mod |= abs(weight_values) >= weight_threshold\n else:\n var_to_mod |= np.any(abs(weight_values) >= weight_threshold, axis=1)\n\n expected_hits = var_to_mod\n # Select a subset of samples to add the effect on\n which_samples = np.random.choice(range(curr_ssize), int(np.floor(class_balance * curr_ssize)),\n replace=False)\n\n if modification_type == 'correlation_weighted':\n mod_data = effect_cohen_d(mod_data, curr_effect, which_vars=var_to_mod,\n which_samples=which_samples, standardized=True,\n noise=0, weight=weight_values)\n else:\n mod_data = effect_cohen_d(mod_data, curr_effect, which_vars=var_to_mod,\n which_samples=which_samples, standardized=True,\n noise=0, weight=None)\n\n # Would it be possible to pass a model selection criteria?\n # P-values for the one-way ANOVA\n pvals = scistats.f_oneway(np.delete(mod_data, which_samples, axis=0),\n mod_data[which_samples, :])[1]\n\n if modification_type == 'correlation_weighted':\n scored_res = score_confusionmetrics(result_vector=pvals, expected_hits=expected_hits,\n weight_vector=weight_values,\n alpha=alpha)\n else:\n scored_res = score_confusionmetrics(result_vector=pvals, expected_hits=expected_hits,\n weight_vector=None,\n alpha=alpha)\n\n for key in scored_res.keys():\n results[key][eff_idx, ssize_idx, rep_idx] = scored_res[key]\n # Would it be possible to pass a model selection criteria?\n # P-values for the one-way ANOVA\n if multiple_testing_correction is not None:\n adjusted_pvalues = multipletests(pvals, alpha=0.05, method=multiple_testing_correction)[1]\n\n scored_res = score_confusionmetrics(result_vector=adjusted_pvalues, expected_hits=expected_hits,\n weight_vector=None,\n alpha=alpha)\n for key in scored_res.keys():\n adjusted_results[key][eff_idx, ssize_idx, rep_idx] = scored_res[key]\n\n results['Sample Size'] = sample_size\n results['Effect Size'] = effect_size\n\n if multiple_testing_correction is not None:\n adjusted_results['Sample Size'] = sample_size\n adjusted_results['Effect Size'] = effect_size\n\n # process the results...\n if multiple_testing_correction is None:\n return results\n else:\n return results, adjusted_results\n\n except TypeError as terp:\n raise terp\n except ValueError as verr:\n raise verr\n except Exception as exp:\n raise exp",
"def __call__(self, individual):\n num_params = individual.get_number_local_optimization_params()\n c_0 = np.random.uniform(*self.options[\"param_init_bounds\"], num_params)\n params = self._run_method_for_optimization(\n self._sub_routine_for_obj_fn, individual, c_0)\n individual.set_local_optimization_params(params)",
"def setup_optimizer(self):\n # The statistical model of our objective function\n init_param_point = dict(zip(self.hyper_param_names,\n self.init_hyper_param))\n init_param_point.update(self.fixed_params_dict)\n init_train_error, init_test_error, init_exec_time = \\\n self.get_obj(init_param_point)\n init_X = np.expand_dims(np.array(self.init_hyper_param), axis=0)\n self.best_obj = init_test_error\n self.train_erro_gp = GPy.models.GPRegression(init_X,\n init_train_error,\n self.train_error_kernel,\n noise_var=\n self.noise_level ** 2)\n self.train_erro_gp.optimize()\n\n self.test_erro_gp = GPy.models.GPRegression(init_X,\n init_test_error,\n self.test_error_kernel,\n noise_var=\n self.noise_level ** 2)\n self.test_erro_gp.optimize()\n\n self.exec_time_gp = GPy.models.GPRegression(init_X,\n init_exec_time,\n self.exec_time_kernel,\n noise_var=\n self.noise_level ** 2)\n self.exec_time_gp.optimize()",
"async def run_minimization(\n func,\n *,\n initial_simplex,\n fake_potential=None,\n nelder_mead_kwargs=MappingProxyType({})\n):\n if fake_potential is not None:\n # TODO: Check if deepcopying fake potential is valid / better.\n # possible issue when not deep-copying is that the fake potential may\n # change during minimization, and the Nelder-Mead algorithm could get\n # horribly stuck when the current best value is within the 'infinite'\n # region.\n modified_kwargs = dict(nelder_mead_kwargs)\n modified_kwargs['ftol'] = float('inf')\n res_fake = await root_nelder_mead(\n func=add_fake_potential(fake_potential, func),\n initial_simplex=initial_simplex,\n **modified_kwargs\n )\n simplex_final = res_fake.simplex_history[-1]\n simplex_blowup = simplex_final[\n 0] + 1.5 * (simplex_final - simplex_final[0])\n\n res = await root_nelder_mead(\n func=func, initial_simplex=simplex_blowup, **nelder_mead_kwargs\n )\n\n return JoinedMinimizationResult(child=res, ancestor=res_fake)\n else:\n return await root_nelder_mead(\n func=func, initial_simplex=initial_simplex, **nelder_mead_kwargs\n )",
"def add_minimize(self, co, var):",
"def test_optimize():\n # Setup the tests environment:\n artifact_path = _setup_environment()\n\n # Create the function parsing this notebook's code using 'code_to_function':\n log_model_function = mlrun.code_to_function(\n filename=\"test_onnx_utils.py\",\n name=\"log_model\",\n kind=\"job\",\n image=\"mlrun/ml-models\",\n )\n\n # Run the function to log the model:\n log_model_run = log_model_function.run(\n handler=\"_log_onnx_model\",\n artifact_path=artifact_path,\n params={\"model_name\": MODEL_NAME},\n local=True,\n )\n\n # Import the ONNX Utils function:\n onnx_function = mlrun.import_function(\"function.yaml\")\n\n # Run the function to optimize our model:\n onnx_function_run = onnx_function.run(\n handler=\"optimize\",\n artifact_path=artifact_path,\n params={\n \"model_path\": log_model_run.outputs[\n \"model\"\n ], # <- Take the logged model from the previous function.\n \"optimized_model_name\": OPTIMIZED_ONNX_MODEL_NAME,\n },\n local=True,\n )\n\n # Cleanup the tests environment:\n _cleanup_environment(artifact_path=artifact_path)\n\n # Print the outputs list:\n print(f\"Produced outputs: {onnx_function_run.outputs}\")\n\n # Verify the '.onnx' model was created:\n assert \"model\" in onnx_function_run.outputs",
"def trained_optimizer():\n options = {\"c1\": 0.5, \"c2\": 0.3, \"w\": 0.9}\n optimizer = GlobalBestPSO(n_particles=10, dimensions=2, options=options)\n optimizer.optimize(sphere, iters=100)\n return optimizer",
"def runOneIteration(self):\n deltas = [2 * np.random.rand(*self.policy.shape) -\n 1 for i in range(self.agent_param.N)]\n rewards = []\n for i in range(self.agent_param.N):\n policy_1 = self.policy + self.agent_param.nu * deltas[i]\n policy_2 = self.policy - self.agent_param.nu * deltas[i]\n\n # Safe ARS - Safe exploration\n do_real_rollout = True\n if self.agent_param.safe:\n simulator = Environment(self.estimated_param)\n reward_1, _ = simulator.rollout(policy_1, covariance=self.covariance,\n mean=self.mean)\n if reward_1 <= self.sim_threshold:\n do_real_rollout = False\n else:\n reward_2, _ = simulator.rollout(policy_2,\n covariance=self.covariance,\n mean=self.mean)\n if reward_2 <= self.sim_threshold:\n do_real_rollout = False\n\n if do_real_rollout:\n # TODO: MODIFY HERE FOR PARALLEL IMPLEMENTATION\n for policy in [policy_1, policy_2]:\n reward, saved_states = \\\n self.real_world.rollout(policy, covariance=self.covariance,\n mean=self.mean)\n if self.agent_param.safe and reward < self.agent_param.threshold:\n print(f\"Obtained in real world rollout a \"\n f\"return of {reward}, below the \"\n f\"threshold {self.agent_param.threshold}\")\n rewards.append(reward)\n if not self.agent_param.V1:\n self.saved_states += saved_states\n self.database.add_trajectory(saved_states, policy)\n\n if len(rewards) > 0:\n # print(rewards)\n order = self.sort_directions(deltas, rewards)\n self.update_policy(deltas, rewards, order)\n\n if self.agent_param.V1 is False:\n states_array = np.array(self.saved_states)\n self.mean = np.mean(states_array, axis=0)\n self.covariance = np.cov(states_array.T)\n # print(f\"mean = {self.mean}\")\n # print(f\"cov = {self.covariance}\")\n return rewards"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add shared Hermes/MQTT commandline arguments. These are useful arguments for every Hermes client, concerning the connection, authentication, site IDs, debugging and logging.
|
def add_hermes_args(parser: argparse.ArgumentParser):
parser.add_argument(
"--host", default="localhost", help="MQTT host (default: localhost)"
)
parser.add_argument(
"--port", type=int, default=1883, help="MQTT port (default: 1883)"
)
parser.add_argument("--username", help="MQTT username")
parser.add_argument("--password", help="MQTT password")
parser.add_argument("--tls", action="store_true", help="Enable MQTT TLS")
parser.add_argument(
"--tls-ca-certs", help="MQTT TLS Certificate Authority certificate files"
)
parser.add_argument("--tls-certfile", help="MQTT TLS client certificate file (PEM)")
parser.add_argument("--tls-keyfile", help="MQTT TLS client key file (PEM)")
parser.add_argument(
"--tls-cert-reqs",
default="CERT_REQUIRED",
choices=["CERT_REQUIRED", "CERT_OPTIONAL", "CERT_NONE"],
help="MQTT TLS certificate requirements for broker (default: CERT_REQUIRED)",
)
parser.add_argument(
"--tls-version", type=int, help="MQTT TLS version (default: highest)"
)
parser.add_argument("--tls-ciphers", help="MQTT TLS ciphers to use")
parser.add_argument(
"--site-id",
action="append",
help="Hermes site id(s) to listen for (default: all)",
)
parser.add_argument(
"--debug", action="store_true", help="Print DEBUG messages to the console"
)
parser.add_argument(
"--log-format",
default="[%(levelname)s:%(asctime)s] %(name)s: %(message)s",
help="Python logger format",
)
|
[
"def addArgs(self):\n \n self.createArgument('--fork', self.fork, 1, 'Fork to background', action='store_true')\n self.createArgument('--run', self.run, 1, 'Execute run on remote server (to be used with --client argument)', action='store_true')\n self.createArgument('--stop', self.stop, 1, 'Stop previous job', action='store_true')\n self.createArgument('--debug', self.debug, 1, 'Debugging mode', action='store_true')\n self.createArgument('--socket', self.setSocket, '', 'use TCP or UDP connection over ethernet/wireless, default TCP, available TCP, UDP, RFC (bluetooth)')\n self.createArgument('--client', self.client, 1, 'Connect to comma separated client addresses')\n self.createArgument('--server', self.bindMode, 1, 'turn into a server mode that handles instructions', action='store_true')\n self.createArgument('--target', self.selectTarget, '', 'target adress (bluetooth mac or ip adress over ethernet/wireless)')\n self.createArgument('--port', self.selectPort, 80, 'destination port')\n self.createArgument('--bytes', self.packetSize, 80, 'number of bytes to send in one packet')",
"def addCommonArguments(self):\n pass",
"def test_add_shared_args(self):\n parser = argparse.ArgumentParser()\n add_shared_args(parser)\n args = parser.parse_args(['--version', '-v'])\n self.assertTrue(args.version)\n self.assertTrue(args.verbose)",
"def add_custom_cli_args(self, cli_parser):\n pass",
"def _cli_extras(self):\n kwargs = self.kwargs or {}\n extras = [\n \"--silent\",\n ]\n for k, v in kwargs.items():\n extras.append(\"--\" + k.replace(\"_\", \"-\"))\n extras.append(str(v))\n\n # For the high/low memory test cases of NTN, SE, etc.\n if self.training_loop_kwargs and \"automatic_memory_optimization\" in self.training_loop_kwargs:\n automatic_memory_optimization = self.training_loop_kwargs.get(\"automatic_memory_optimization\")\n if automatic_memory_optimization is True:\n extras.append(\"--automatic-memory-optimization\")\n elif automatic_memory_optimization is False:\n extras.append(\"--no-automatic-memory-optimization\")\n # else, leave to default\n\n extras += [\n \"--number-epochs\",\n self.train_num_epochs,\n \"--embedding-dim\",\n self.embedding_dim,\n \"--batch-size\",\n self.train_batch_size,\n ]\n extras.extend(self.cli_extras)\n\n # Make sure that inverse triples are created if create_inverse_triples=True\n if self.create_inverse_triples:\n extras.append(\"--create-inverse-triples\")\n\n extras = [str(e) for e in extras]\n return extras",
"def add_arguments(self, parser):\n parser.add_argument(\n \"--datetime\",\n action=\"store\",\n help=\"ISO datetime used for calculating eligibility. Defaults to now. Currently only used for backdating command runs in tests.\",\n )\n parser.add_argument(\n \"--global_userinfo\",\n action=\"store\",\n help=\"specify Wikipedia global_userinfo data. Defaults to fetching live data. Currently only used for faking command runs in tests.\",\n )",
"def add_standard_args(self):\n self.add_argument(\"-v\", \"--verbose\",\n help=\"Set log verbosity to True, nominal debug level.\", action=\"store_true\")\n self.add_argument(\"--verbosity\",\n help=\"Set log verbosity to a specific level: 0..100.\", type=int, default=0)\n self.add_argument(\"--dump-cmdline\", action=\"store_true\",\n help=\"Dump the command line parameters used to start the script to the log.\")\n self.add_argument(\"-R\", \"--readonly-cache\", action=\"store_true\",\n help=\"Don't modify the CRDS cache. Not compatible with options which implicitly modify the cache.\")\n self.add_argument('-I', '--ignore-cache', action='store_true', dest=\"ignore_cache\",\n help=\"Download required files even if they're already in the cache.\")\n self.add_argument(\"-V\", \"--version\",\n help=\"Print the software version and exit.\", action=\"store_true\")\n self.add_argument(\"-J\", \"--jwst\", dest=\"jwst\", action=\"store_true\",\n help=\"Force observatory to JWST for determining header conventions.\"\"\")\n self.add_argument(\"-H\", \"--hst\", dest=\"hst\", action=\"store_true\",\n help=\"Force observatory to HST for determining header conventions.\"\"\")\n self.add_argument(\"--roman\", dest=\"roman\", action=\"store_true\",\n help=\"Force observatory to Roman for determining header conventions.\"\"\")\n self.add_argument(\"--stats\", action=\"store_true\",\n help=\"Track and print timing statistics.\")\n self.add_argument(\"--profile\",\n help=\"Output profile stats to the specified file.\", type=str, default=\"\")\n self.add_argument(\"--log-time\", action=\"store_true\",\n help=\"Add date/time to log messages.\")\n self.add_argument(\"--pdb\",\n help=\"Run under pdb.\", action=\"store_true\")\n self.add_argument(\"--debug-traps\",\n help=\"Bypass exception error message traps and re-raise exception.\", action=\"store_true\")",
"def add_shared_arguments_for_recipes(parser: argparse.ArgumentParser) -> None:\n config = load_config_file()\n default_save_path = config.get('save_path', 'value', vars=os.environ)\n\n parser.add_argument('ref_data', nargs='?', default=None, type=valid_existing_path,\n help='Filepath to the reference data folder')\n parser.add_argument('--start_yr', default=\"1958\", type=valid_year_string,\n help='Initial year cutoff. Default is 1958, which is the first year of the Mauna Loa CO2 record.')\n parser.add_argument('--end_yr', default=\"2014\", type=valid_year_string,\n help='Final year cutoff. Default is 2014, which is the final year for CMIP6 historical runs.')\n parser.add_argument('--figure_savepath', default=default_save_path,\n type=valid_writable_path, help='Filepath for saving generated figures')",
"def AddCreateCommonArgs(parser):\n AddAdminEnabled(parser)\n AddDescription(parser)\n AddCustomerName(parser)\n AddLinkType(parser)\n AddNocContactEmail(parser)\n AddRequestedLinkCount(parser)",
"def add_cli_arguments(self, parser):\n super(Application, self).add_cli_arguments(parser)\n\n add_kafka_manager_api_cli_arguments(parser)",
"def setup_args():\n parser = ParlaiParser(False, False)\n parser_grp = parser.add_argument_group('Browser Chat')\n parser_grp.add_argument(\n '--port', default=35496, type=int, help='Port used by the web socket (run.py)'\n )\n parser_grp.add_argument(\n '--host',\n default='0.0.0.0',\n type=str,\n help='Host from which allow requests, use 0.0.0.0 to allow all IPs',\n )\n parser_grp.add_argument(\n '--serving_port',\n default=8080,\n type=int,\n help='Port used to configure the server',\n )\n\n return parser.parse_args()",
"def add_cmdline_arg(args, arg, *values):\n if arg not in args:\n args = list(args) + [arg] + list(values)\n return args",
"def extra_start_args(self):\n\n\t\treturn self.tool_config.get('extra_start_args', default = '')",
"def add_arguments(self, parser):\r\n parser.add_argument(\"digcoll_retriever_host\",\r\n help=\"The host of the digcoll_retriever\"),\r\n parser.add_argument(\"project_api\",\r\n help=\"\", type=str)\r\n parser.add_argument(\"import_data_file\",\r\n help=\"An identifier for a particular MVol issue\", type=str)",
"def appendCommonOptions(parser):\r\n\t\r\n\t# Chip\r\n\t# parser.add_argument('--chip',\r\n\t# help = 'Target chip type',\r\n\t# choices = ['GAP8', 'GAP8_V2'],\r\n\t# default = os.environ.get('TARGET_CHIP', ''))\r\n\t\r\n\t# Port\r\n\t# parser.add_argument(\r\n\t# \t'--port', '-p',\r\n\t# \thelp='Serial port device',\r\n\t# \tdefault=os.environ.get('ESPTOOL_PORT', None))\r\n\t\r\n\t# Baud rate\r\n\t# parser.add_argument(\r\n\t# \t'--baud', '-b',\r\n\t# \thelp='Serial port baud rate used when flashing/reading',\r\n\t# \ttype=arg_auto_int,\r\n\t# \tdefault=os.environ.get('ESPTOOL_BAUD', ESPLoader.ESP_ROM_BAUD))\r\n\t\r\n\t# Quiet\r\n\tparser.add_argument('-q',\r\n\t action = 'store_false',\r\n\t dest = 'verbose',\r\n\t help = 'Quiet mode, print only the critical messages.')",
"def add_simple_args(self):\n self.ctrl_parser.add_argument(\"-V\", \"--version\", action=\"version\", version='0.1.0',\n help='Provides the version of the tool')\n self.ctrl_parser.add_argument(\"-v\", \"--verbosity\", action=\"count\", help=\"increase output verbosity\")\n self.ctrl_parser.add_argument(\"-i\", action=InteractiveCli, nargs=0, help=\"Start in interactive mode\")\n self.ctrl_parser.add_argument(\"-t\", \"--timeout\", type=float,\n help=\"Provides a timeout for the command\")",
"def _add_arguments(self):\n #the base arguments\n self.parser.add_argument(\"-d\", \"--debug\",\n help=\"Display debugging messages.\",\n action=\"store_true\",\n default=False, dest=\"debug\")\n \n self.parser.add_argument(\"--pudb\",\n help=\"Enable pudb interactive debugging.\",\n action=\"store_true\",\n default=False, dest='pudb')\n\n self.parser.add_argument(\"--pdb\",\n help=\"Enable python's debugger\",\n action=\"store_true\",\n default=False, dest='pdb')\n \n\n self.parser.add_argument(\"-s\", \"--silent\",\n help=\"Turn off screen output.\",\n action=\"store_true\", default=False,\n dest='silent')\n return",
"def record_argv(args):\n ssh_env = _ssh_env_vars(os.environ)\n parts = sys.argv[:]\n parts += [\"{}={}\".format(k, v) for k, v in ssh_env.items()]\n line = \" \".join(parts)\n extra = _add_extra_ssh_data(ssh_env, args)\n extra = _add_logging_extra(extra)\n logging.getLogger(_audit_logger_name).warning(line, extra=extra)",
"def getCommonArgs(description=\"\"):\n return getCommonArgsParser(description).parse_args()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Connect to an MQTT broker with supplied arguments.
|
def connect(client: mqtt.Client, args: argparse.Namespace):
if args.username:
client.username_pw_set(args.username, args.password)
# TLS
if args.tls:
# TLS is enabled
if args.tls_version is None:
# Use highest TLS version
args.tls_version = ssl.PROTOCOL_TLS
if args.tls_ca_certs is not None:
args.tls_ca_certs = os.path.expandvars(args.tls_ca_certs)
if args.tls_certfile is not None:
args.tls_certfile = os.path.expandvars(args.tls_certfile)
if args.tls_keyfile is not None:
args.tls_keyfile = os.path.expandvars(args.tls_keyfile)
client.tls_set(
ca_certs=args.tls_ca_certs,
certfile=args.tls_certfile,
keyfile=args.tls_keyfile,
cert_reqs=getattr(ssl, args.tls_cert_reqs),
tls_version=args.tls_version,
ciphers=(args.tls_ciphers or None),
)
client.connect(args.host, args.port)
|
[
"def connect_mqtt(self):\n\n\t\tdef on_connect(client, userdata, flags, rc):\n\t\t\t\"\"\"\n\t\t\tThis method is the callback for a connection try.\n\t\t\t:param client: the client\n\t\t\t:param userdata: the submitted userdata\n\t\t\t:param flags: the submitted connection flags\n\t\t\t:param rc: the response code\n\t\t\t\"\"\"\n\t\t\tif rc == 0:\n\t\t\t\tprint(f\"[{self.game}]: Connected to MQTT Broker!\")\n\t\t\telse:\n\t\t\t\tprint(f\"[{self.game}]: Failed to connect, return code %d\\n\", rc)\n\n\t\tclient = mqtt_client.Client(self.client_id)\n\t\tclient.on_connect = on_connect\n\t\tclient.connect(self.broker, self.port)\n\t\treturn client",
"def connect(mqtt_conf):\n logger.info(\"Creating MQTT client.\")\n client = mqtt.Client()\n client.on_publish = on_publish\n\n username = mqtt_conf.get('USERNAME', '')\n password = mqtt_conf.get('PASSWORD', '')\n\n client.username_pw_set(username, password)\n logger.info(\"Connecting to MQTT server\")\n\n host = mqtt_conf.get('HOST', 'localhost')\n port = mqtt_conf.get('PORT', 1883)\n client.connect(host, port)\n return client",
"def create_MQTT_Client(args):\n\n usetls = args.use_tls\n\n if args.cacerts:\n usetls = True\n\n port = args.port \n if port is None:\n if usetls:\n port = 8883\n else:\n port = 1883\n\n mqttc = mqtt.Client(args.clientid,clean_session = not args.disable_clean_session)\n\n if usetls:\n if args.tls_version == \"tlsv1.2\":\n tlsVersion = ssl.PROTOCOL_TLSv1_2\n elif args.tls_version == \"tlsv1.1\":\n tlsVersion = ssl.PROTOCOL_TLSv1_1\n elif args.tls_version == \"tlsv1\":\n tlsVersion = ssl.PROTOCOL_TLSv1\n elif args.tls_version is None:\n tlsVersion = None\n else:\n print (\"Unknown TLS version - ignoring\")\n tlsVersion = None\n\n if not args.insecure:\n cert_required = ssl.CERT_REQUIRED\n else:\n cert_required = ssl.CERT_NONE\n \n mqttc.tls_set(ca_certs=args.cacerts, certfile=None, keyfile=None, cert_reqs=cert_required, tls_version=tlsVersion)\n\n if args.insecure:\n mqttc.tls_insecure_set(True)\n\n if args.username or args.password:\n mqttc.username_pw_set(args.username, args.password)\n\n mqttc.on_message = on_message\n mqttc.on_connect = on_connect\n mqttc.on_publish = on_publish\n mqttc.on_subscribe = on_subscribe\n mqttc.on_unsubscribe = on_unsubscribe\n\n if args.debug:\n mqttc.on_log = on_log\n\n print(\"Connecting to \"+args.host+\" port: \"+str(port))\n mqttc.connect(args.host, port, args.keepalive)\n\n return mqttc",
"def cli(ctx, server, port, tls, timeout, verbose, prefix, topic):\n ctx.ensure_object(dict)\n topic = get_topic(prefix, topic)\n ctx.obj[\"engine\"] = engine.MQTT(server, port, tls, topic, timeout, verbose)",
"def connect_mqtt():\n # Connect to the MQTT client\n client = mqtt.Client()\n client.connect(MQTT_HOST, MQTT_PORT, MQTT_KEEPALIVE_INTERVAL)\n\n return client",
"def connect_and_subscribe():\n global client\n client = MQTTClient(machine_id, broker)\n client.set_callback(mqtt_callback)\n client.connect()\n print(\"Connected to {}\".format(broker))\n for topic in (b'config', b'set'):\n t = topic_name(topic)\n client.subscribe(t)\n print(\"Subscribed to {}\".format(t))",
"def connect(self):\n self._mqtt = IoTMQTT(self, self._wifi_manager, self._hostname, self._device_id, self._shared_access_key, self._token_expires, self._logger)\n self._mqtt.connect()",
"def connect_to_mqtt_to_talk_to_robot(self,\n mqtt_broker_ip_address=None,\n lego_robot_number=None):\n self.connect(\"msg4pc\", \"msg4ev3\",\n mqtt_broker_ip_address, lego_robot_number)",
"def connect_and_subscribe(sub_callback=None):\n with open(\"credentials.json\", \"r\") as f:\n credentials = ujson.load(f)\n \n try:\n from umqtt.robust import MQTTClient\n except ImportError as e:\n import upip\n upip.install('micropython-umqtt.simple')\n upip.install('micropython-umqtt.robust')\n from umqtt.robust import MQTTClient\n \n # Set Options for MQTT-Broker\n client = MQTTClient(ubinascii.hexlify(machine.unique_id()), credentials[\"mqtt\"][\"host\"], credentials[\"mqtt\"][\"port\"])\n # Set callback to handle Messages\n if sub_callback is not None:\n client.set_callback(sub_callback)\n # Connect\n client.connect(clean_session=False)\n for topic in credentials[\"mqtt\"][\"topics\"]:\n client.subscribe(topic)\n time.sleep(3)\n client.check_msg()\n return client",
"def connect_to_mqtt_to_talk_to_laptop(self,\n mqtt_broker_ip_address=None,\n lego_robot_number=None):\n self.connect(\"msg4ev3\", \"msg4pc\",\n mqtt_broker_ip_address, lego_robot_number)",
"def __connect_with_credentials(self):\n\t\tself.client_.username_pw_set(\"xgvutxaa\", \"9cMIpVoL4Ujj\")\n\t\tself.client_.connect('spectacular-pharmacist.cloudmqtt.com',1883,3600)",
"def initialize(self):\n self.logger.info(\"Initializing connection to the MQTT broker.\")\n self.client = AWSIoTMQTTClient(self.client_id)\n self.client.configureEndpoint(self.endpoint, portNumber=8883)\n self.client.configureCredentials(CAFilePath=self.root_ca, KeyPath=self.private_key,\n CertificatePath=self.client_certificate)\n self.client.configureConnectDisconnectTimeout(self.conn_disconnect_timeout)\n self.client.configureMQTTOperationTimeout(self.mqtt_oper_timeout)\n if self.client.connect():\n self.logger.info(\"Connected!\")",
"def __init_mqtt(self):\n\n def on_connect(client, userdata, flags, rc):\n \"\"\"Callback for when the connection is established with the mqtt broker\"\"\"\n try:\n logging.info('MQTT Paho Connected with result code ' + str(rc))\n self.flag_connected = True\n logging.info('Subscribing to invoke topic')\n client.subscribe(self.invoke_topic)\n client.subscribe(self.cloud_to_device_topic)\n\n\n except Exception as e:\n logging.warning(\"on_connect with result error %s\" % e)\n\n def on_message(client, userdata, msg):\n \"\"\"Callback for when a message is received by client\"\"\"\n logging.info('MQTT message arrived')\n print('MQTT message arrived')\n logging.debug('topic %s' % msg.topic)\n print('topic %s' % msg.topic)\n logging.debug('payload %s' % msg.payload)\n print('payload %s' % msg.payload)\n self.handle_mqtt_messages(msg.topic, msg.payload)\n\n def on_disconnect(client, userdata, rc):\n \"\"\"Callback for when the connection is lost\"\"\"\n self.flag_connected = False\n logging.info('MQTT Disconnected!!')\n\n self.paho_client_mqtt = mqtt.Client(client_id=self.device_id, protocol=self.broker_mqtt_protocol)\n self.paho_client_mqtt.on_connect = on_connect\n self.paho_client_mqtt.on_message = on_message\n self.paho_client_mqtt.on_disconnect = on_disconnect\n self.paho_client_mqtt.username_pw_set(username=self.username)\n self.paho_client_mqtt.tls_set(ca_certs=self.broker_mqtt_CACert,\n certfile=self.device_cert,\n keyfile=self.device_key,\n cert_reqs=ssl.CERT_REQUIRED,\n tls_version=ssl.PROTOCOL_TLSv1_2,\n ciphers=None)\n self.paho_client_mqtt.tls_insecure_set(True)",
"def run(self):\n if self._username and self._password:\n self.username_pw_set(self._username, self._password)\n self.connect_async(self._mqtt_ip, self._mqtt_port)\n self.loop_start()",
"def config(self, topic, host, username=None, password=None):\n\n self.topic = topic\n\n self.options = {'hostname': host}\n\n if username is not None and password is not None:\n logging.debug(\"connected to MQTT with authentication\")\n self.options['auth'] = {'username': username, 'password': password}\n else:\n logging.debug(\"connected to MQTT without authentication\")",
"def __init__(self, delegate=None):\n self.client = mqtt.Client()\n self.delegate = delegate\n self.subscription_topic_name = None\n self.publish_topic_name = None\n self.rose_broker = \"mosquitto.csse.rose-hulman.edu\"",
"def connect(username, pwd, brokerName):\n global channel, _parameters\n try:\n _parameters = pika.URLParameters(\"amqps://{}:{}@{}/space-maker-vhost?heartbeat_interval=30&socket_timeout=1\".format(username, pwd, brokerName))\n connection = pika.BlockingConnection(_parameters)\n channel = connection.channel()\n return True\n except:\n logger.exception(\"broker connection failure\")\n return False",
"def start_mqtt():\n with app.app_context():\n sub = Subscriber()\n sub.subscribe()",
"def configure_client():\n client.on_connect = on_connect\n client.on_disconnect = on_disconnect\n client.will_set('status/mqttc', payload=\"disconnected\", qos=1, retain=True)\n print('connecting')\n client.connect('broker', 1883, 60)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Will resolve context processors from AppConfigs and add them to templates (list of backend configurations).
|
def add_required_context_processors(templates, installed_apps, option=None):
option = option or DEFAULT_CONTEXT_PROCESSORS_OPTION
processors = defaultdict(list)
for appc in ensure_app_configs(installed_apps):
required_cps = getattr(appc, option, None)
if not required_cps:
continue
if isinstance(required_cps, str):
required_cps = { DEFAULT_TEMPLATE_BACKEND: (required_cps,) }
elif isinstance(required_cps, Iterable): # note: str is Iterable
required_cps = { DEFAULT_TEMPLATE_BACKEND: required_cps }
for backend, cps in required_cps.items():
processors[backend].extend(cps)
templates_map = OrderedDict((x.get('BACKEND'), x) for x in templates)
for backend, cps in processors.items():
conf = templates_map.get(backend)
if conf:
options = conf.setdefault('OPTIONS', {})
all_cps = chain(options.get('context_processors', ()), cps)
options['context_processors'] = tuple(unique(all_cps))
|
[
"def update_context_processors_from_apps(settings, processors_option=None):\n settings = SettingsDict.ensure(settings)\n installed_apps = settings.get('INSTALLED_APPS')\n templates = settings.get('TEMPLATES')\n if installed_apps and templates:\n add_required_context_processors(templates, installed_apps, option=processors_option)",
"def register_context_processors(app: Flask) -> None:\n app.context_processor(inject_get_alerts)\n app.context_processor(inject_get_hidden_alerts)\n app.context_processor(inject_a11y_url)",
"def _init_with_config(self):\n self.app_name = self.config.app_name\n\n if '.*' not in self.config.processors_tag_regex:\n self.config.processors_tag_regex.append('.*')\n self.processors = []\n for processor_tag_regex in self.config.processors_tag_regex:\n self.processors.append(\n Processor(processor_tag_regex))",
"def use_cache_template_loader_in_production(settings, cached_backends=None):\n # FIXME: this is done by Django from version 1.11 onwards, thus drop this at some point\n settings = SettingsDict.ensure(settings)\n debug = settings.get('DEBUG', False)\n templates = settings.get('TEMPLATES')\n cached_backends = cached_backends or DEFAULT_CACHED_BACKENDS\n\n if not templates or debug:\n return\n\n for conf in templates:\n if conf['BACKEND'] in cached_backends:\n options = conf.setdefault('OPTIONS', {})\n loaders = options.get('loaders')\n if not loaders or DEFAULT_CACHED_LOADER not in flatten_loaders(loaders):\n if not loaders:\n loaders = (DEFAULT_LOADER,)\n if conf.get('APP_DIRS', False):\n loaders += (DEFAULT_APP_LOADER,)\n loaders = ((DEFAULT_CACHED_LOADER, loaders),)\n options['loaders'] = loaders\n conf.pop('APP_DIRS')",
"def register_shellcontext(app):\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n return {\n 'db': db,\n 'model': models\n }\n\n app.shell_context_processor(shell_context)",
"def configure_app_routes(config):\n\n # The app_route_prefixes dictionary for overriding app route prefixes\n app_route_prefixes = {\n #'blog': '/myblog'\n }\n\n for app_name in enabled_apps:\n app_route_prefix = app_route_prefixes.get(app_name, '/%s' % app_name)\n app_module = importlib.import_module(\".apps.%s\" % app_name, \"mitteilender\")\n\n try:\n config.include(app_module.application_routes, route_prefix=app_route_prefix)\n except Exception, e:\n print(repr(e))",
"def app_template_paths(subdir=None):\n # Note: keep in sync with: Blender's BKE_appdir_app_template_any\n\n subdir_tuple = (subdir,) if subdir is not None else ()\n\n # Avoid adding 'bl_app_templates_system' twice.\n # Either we have a portable build or an installed system build.\n for resource_type, module_name in (\n ('USER', \"bl_app_templates_user\"),\n ('LOCAL', \"bl_app_templates_system\"),\n ('SYSTEM', \"bl_app_templates_system\"),\n ):\n path = resource_path(resource_type)\n if path:\n path = _os.path.join(\n *(path, \"scripts\", \"startup\", module_name, *subdir_tuple))\n if _os.path.isdir(path):\n yield path\n # Only load LOCAL or SYSTEM (never both).\n if resource_type == 'LOCAL':\n break",
"def get_context(self, context, with_labels, admin_site):\n site = get_admin_site(admin_site)\n\n if context_passes_test(context) and site is not None:\n modeladmins = get_registered_modeladmins(context['request'], site)\n context.update({\n 'should_display_toolbar': True,\n 'should_display_apps': with_labels,\n 'app_list': _resort_modeladmins(modeladmins),\n })\n return context",
"def _create_template_config(self, config):\n pass",
"def webpack_config(context: Context):\n context.write_template('webpack.config.js')",
"def inject_templates(self):\n\n # Sorry, found no other way to get this\n mod_path = sys.modules[self.__class__.__module__].__file__\n mod_dir = os.path.dirname(mod_path)\n tmpl_dir = os.path.join(\n mod_dir,\n 'templates',\n self.site.template_system.name\n )\n if os.path.isdir(tmpl_dir):\n # Inject tmpl_dir low in the theme chain\n self.site.template_system.inject_directory(tmpl_dir)",
"def determine_contexts(self):\n return []",
"def resolve_contexts():\n contexts = ix.api.OfContextSet()\n ix.application.get_factory().get_root().resolve_all_contexts(contexts)\n with disabled_app():\n resolve(contexts[0])",
"def add_dynamic_context(self, plugin):\n self._add_plugin(plugin, self.context_switchers)",
"def get_template_overlay_context():\n context = {}\n contexts = [\n deployment_env.get_deployment_context(),\n ]\n try:\n contexts.append(get_charm_config_context())\n except KeyError:\n pass\n\n for c in contexts:\n context.update(c)\n return context",
"def context_processor(self):\n return {\n 'compute': self.compute,\n 'convert': self.convert\n }",
"def setup_path_templating( self\n , pathContexts\n , pathDefinitions ):\n self.pStk = lamia.core.configuration.compose_stack(pathContexts, pathDefinitions)",
"def create_context(self):\n flask.g.context = self._context_class()",
"def inject_into_context():\n return dict(\n dev_server = running_local # Variable dev_server is True if running on the GAE development server\n )",
"def configure_template_filters(app):\r\n app.jinja_env.filters['format_date'] = format_date\r\n app.jinja_env.filters['time_since'] = time_since\r\n app.jinja_env.filters['older_than_one_month'] = older_than_one_month\r\n app.jinja_env.filters['time_left_to'] = time_left_to\r\n app.jinja_env.filters['is_online'] = is_online\r\n app.jinja_env.filters['crop_title'] = crop_title\r\n app.jinja_env.filters['quote'] = quote"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update settings module with upper case values from another module.
|
def update_settings_from_module(settings, module_name, search_base=None, quiet=False):
settings = SettingsDict.ensure(settings)
if search_base is None:
search_base = settings.name.rpartition('.')[0]
module, tried = find_and_import_module(module_name, search=search_base)
if module:
data = {setting: getattr(module, setting) for setting in dir(module) if setting.isupper()}
settings.update(data)
unload_module(module) # module can be removed from the memory as all values have been loaded
del module
return len(data)
elif not quiet:
warning("Couldn't find {}. Tried: {}".format(module_name, tried))
return 0
|
[
"def load(self, module):\n log = logging.getLogger()\n log.debug(\"Loading settings from '{0}'\".format(module.__file__))\n \n for key, value in module.__dict__.iteritems():\n if not key.startswith('_') and key.isupper():\n setattr(self, key, value)",
"def update(self, settings):\n names = getFieldNames(IZEOConnection)\n for key, value in settings.items():\n if key in names:\n setattr(self, key, value)",
"def toUpper(self):\n self.name.toUpper()\n self.ext.toUpper()",
"def update_override_settings(self, override_settings: dict) -> None:",
"def setUpper(*args, **kwargs):\n \n pass",
"def _extend_collection(module):\n for setting in dir(module):\n if setting == setting.upper():\n setattr(collection, setting, getattr(module, setting))",
"def update_settings(user_app_name):\n\n base_apps = None\n settings = open(SETTINGS, \"r\").readlines()\n settings_app = [x for x in settings if \"'APPS':\" in x][0]\n settings_at = settings.index(settings_app)\n\n apps = re.findall('.*?\\[(.*?)\\].*?', settings_app.strip())\n if apps and len(apps) == 1:\n apps = apps[0]\n if apps:\n base_apps = apps\n apps = [x.strip() for x in apps.split(',')]\n apps.extend([\"'%s'\" % user_app_name])\n apps = ', '.join(apps)\n\n settings_app = re.sub(base_apps, apps, settings_app)\n settings[settings_at] = settings_app\n settings = reduce(lambda a, b: a + b, settings)\n with open(SETTINGS, \"w\") as sfile:\n sfile.write(settings)",
"def change_case(self):\n self.plain_text = self.plain_text.lower()",
"def testUpdateSettings(self):\n user = profile_utils.seedNDBUser()\n profile_utils.loginNDB(user, is_admin=True)\n\n postdata = {\n 'description': TEST_DESCRIPTION,\n 'active_program': self.site.active_program.key()\n }\n response = self.post('/site/edit', postdata=postdata)\n self.assertResponseRedirect(response, url='/site/edit')\n\n site = site_model.Site.get_by_key_name('site')\n self.assertEqual(site.description, TEST_DESCRIPTION)",
"def change_en_US(self):\n self.language = 'en_US'\n self.save_settings_to_file()\n self.load_settings()\n self.start_setting()",
"def test_injection_requires_capital_names(self):\n set_app_default_setting('test_setting', 'foo')\n with self.assertRaises(AttributeError):\n self.assertEqual(settings.test_setting, 'foo')",
"def update(self):\n logging.info(\"Updating settings...\")\n settings = storage.get_settings(self.db)\n if settings:\n for row in settings:\n self.settings[row.name] = row.value\n else:\n logging.warn(\"Could not find any settings in database - everything setup ok?\")",
"def convert_to_uppercase(item_in_dict):\n \n try:\n for key in item_in_dict.keys():\n item_in_dict[key.upper()] = convert_to_uppercase(item_in_dict.pop(key))\n except AttributeError:\n try:\n return item_in_dict.upper()\n except AttributeError:\n return item_in_dict\n return item_in_dict",
"def recase(self, variable):\n pass",
"def test_update_cloud_settings(self):\n pass",
"def toUpper(self):\n self.value = self.value.upper()\n return self",
"def change_ru_RU(self):\n self.language = 'ru_RU'\n self.save_settings_to_file()\n self.load_settings()\n self.start_setting()",
"def update(self, settings):\n update_config = settings.configuration\n self.configuration.update(update_config)",
"def test_update_fts_settings(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Will update only a single value from a python module. By default this value is SECRET_KEY, but that can be changed with `setting` argument. If the module doesn't exists, then a new file is created unless `create_if_missing` is False. Module is searched starting at the peer of settings module. Alternative search path can be given with `search_base`. Argument `secret_key_file` can be a python module name or file path. File path can be used to import module from outside of project.
|
def update_secret_from_file(settings, secret_key_file=None, search_base=None, create_if_missing=True, setting=None):
settings = SettingsDict.ensure(settings)
secret_key_file = secret_key_file or DEFAULT_SECRET_KEY_FILE
setting = setting or 'SECRET_KEY'
if settings.get(setting):
# We already have non null secret_key
return
if search_base is None:
search_base = settings.name.rpartition('.')[0]
direct_file = '/' in secret_key_file or secret_key_file.endswith('.py')
if direct_file:
name, _ = splitext(basename(secret_key_file))
module = load_module_from_file(name, secret_key_file)
else:
module, _ = find_and_import_module(secret_key_file, search=search_base)
if module:
if hasattr(module, setting):
settings[setting] = getattr(module, setting)
else:
warning("Setting {} was not found from {}.".format(setting, module.__file__))
unload_module(module) # module can be removed from the memory as the value have been loaded
del module
elif create_if_missing:
if not direct_file:
secret_key_file = file_path_from_module_name(search_base, secret_key_file)
try:
key = create_secret_key_file(secret_key_file, setting=setting)
except IOError as e:
warning("Setting {} is not defined and we were unable to create {}: {}".format(setting, secret_key_file, e))
else:
print("Note: Stored setting {} in {}".format(setting, secret_key_file))
settings[setting] = key
|
[
"def find_or_create_secret_key():\n SECRET_KEY_DIR = os.path.dirname(__file__)\n SECRET_KEY_FILEPATH = os.path.join(SECRET_KEY_DIR, 'secret_key.py')\n sys.path.insert(1, SECRET_KEY_DIR)\n\n if os.path.isfile(SECRET_KEY_FILEPATH):\n from .secret_key import SECRET_KEY\n return SECRET_KEY\n from django.utils.crypto import get_random_string\n chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'\n new_key = get_random_string(50, chars)\n with open(SECRET_KEY_FILEPATH, 'w') as file:\n file.write(\"# Django secret key\\n# Do NOT check this into version control.\"\n \"\\n\\nSECRET_KEY = '%s'\\n\" % new_key)\n from .secret_key import SECRET_KEY\n return SECRET_KEY",
"def update_settings_from_module(settings, module_name, search_base=None, quiet=False):\n settings = SettingsDict.ensure(settings)\n if search_base is None:\n search_base = settings.name.rpartition('.')[0]\n module, tried = find_and_import_module(module_name, search=search_base)\n\n if module:\n data = {setting: getattr(module, setting) for setting in dir(module) if setting.isupper()}\n settings.update(data)\n unload_module(module) # module can be removed from the memory as all values have been loaded\n del module\n return len(data)\n elif not quiet:\n warning(\"Couldn't find {}. Tried: {}\".format(module_name, tried))\n return 0",
"def ensure_secret_key_file():\n secret_path = os.path.join(ABS_PATH('settings'), 'secret.py')\n if not os.path.exists(secret_path):\n from django.utils.crypto import get_random_string\n secret_key = get_random_string(50, 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)')\n with open(secret_path, 'w') as f:\n f.write(\"SECRET_KEY = \" + repr(secret_key) + \"\\n\")",
"def update_secret_file(config):\n iswitch_secret_file = get_iswitch_secretfile_path(config)\n irodsa_file = get_irodsa_path()\n\n if path.exists(irodsa_file):\n os.remove(irodsa_file)\n\n if path.exists(iswitch_secret_file):\n copy2(iswitch_secret_file, irodsa_file)\n os.chmod(iswitch_secret_file, 0o600)",
"def editConf(self, settingName, value):\n dirname, filename = os.path.split(os.path.abspath(__file__))\n path = dirname\n file_path = path.replace(\"\\\\\", \"/\") + \"/config.txt\"\n fh, abs_path = mkstemp()\n with fdopen(fh, 'w') as new_file:\n with open(file_path) as old_file:\n for line in old_file:\n if len(line) == 0 or line[0:2] == \"//\":\n new_file.write(line)\n elif settingName in line:\n new_file.write(line.replace(line, settingName + value))\n else:\n new_file.write(line)\n # Copy the file permissions from the old file to the new file\n copymode(file_path, abs_path)\n # Remove original file\n remove(file_path)\n # Move new file\n move(abs_path, file_path)",
"def browseSettingFile(self):\n #Open file:\n self.settingsFilename = askopenfilename(filetypes=[('settings file','*.pkl')])\n self.settings.settings = templateLoader(self.settingsFilename)\n \n #Update settings data:\n for key in self.entries:\n var = self.entries[key]\n #Supposed to update fields:\n var.set(self.settings.settings[key].value)\n for key in self.buttons:\n var = self.buttons[key]\n #Supposed to update button states:\n var.set(self.settings.settings[key].value)",
"def update(section, key, value):\n config = ConfigParser.RawConfigParser()\n config.read(Config.RELATIVE_CONFIG_FILE_PATH)\n config.set(section, key, value)\n with open(Config.RELATIVE_CONFIG_FILE_PATH, 'wb') as configfile:\n config.write(configfile)",
"def sync_active_secret_file():\n irodsa_file = get_irodsa_path()\n current_iswitch_secret_file = get_iswitch_secretfile_path(\n get_current_config())\n\n if path.isfile(irodsa_file):\n copy2(irodsa_file, current_iswitch_secret_file)\n\n if (not path.exists(irodsa_file) and\n path.exists(current_iswitch_secret_file)):\n os.remove(current_iswitch_secret_file)",
"async def secret_config(self, ctx: commands.Context, key, value):\n pass",
"def update_file():\n with open(CONFIG_PATH, \"w\") as configfile:\n config.write(configfile)",
"def setval(filepath, key, value):\n if key.upper().startswith(\"META_\"):\n key = key.replace(\"META_\", \"META.\")\n file_obj = file_factory(filepath)\n file_obj.setval(key, value)",
"def load(self):\n try:\n module_string = self.module_file_path()\n except InvalidSettingFile as e:\n raise InvalidSettingFile(self.settings_file(), exc=e)\n else:\n try:\n module = importlib.import_module(module_string)\n except (ImportError, TypeError) as e:\n raise InvalidSettingFile(self.settings_file(), exc=e)\n else:\n for param in (s for s in dir(module) if not s.startswith('_')):\n param_value = getattr(module, param)\n\n # Ignore Import Statements of Modules and Import Statements of Functions\n if not inspect.ismodule(param_value) and not inspect.isfunction(param_value):\n self.__setitem__(param, param_value)",
"def __load_from_settings(self, key):\n\n with self.lock:\n try:\n with open(self.SETTINGS_FILE, 'r') as file:\n data = json.load(file)\n\n except FileNotFoundError:\n return self.DEFAULT_SETTINGS[key]\n\n return data[key]",
"def load_settings():\n gpio.setwarnings(False)\n settings_file = open('/home/pi/gpio_settings.cfg')\n settings = json.load(settings_file)\n \n return settings",
"def set_cookie_secret(project_directory):\n project_settings_file = os.path.join(project_directory, 'settings.py')\n with open(project_settings_file) as f:\n file_ = f.read()\n file_ = file_.replace('!!CHANGEME!!', generate_random_string())\n with open(project_settings_file, 'w') as f:\n f.write(file_)",
"def update_secret(ctx, secret_id, secret_value, prod=False):\n if not check_role_assumed(ctx):\n return\n result = run_secrets_command(ctx, UPDATE_SECRET_COMMAND, secret_id, secret_value, prod, True)\n if not result:\n run_secrets_command(ctx, ADD_SECRET_COMMAND, secret_id, secret_value, prod)",
"def get_secret(setting, secrets=secrets):\n try:\n return secrets[setting]\n except KeyError:\n error_msg = \"Set the {0} environment variable\".format(setting)\n raise ImproperlyConfigured(error_msg)",
"def update_a_python_environment_variable(main, file):\n if main.replace_in_file(file, \"%PYTHON%\", \"%PYTHON_HOME%\") and \\\n main.replace_in_file(file, \"PYTHON:\", \"PYTHON_HOME:\"):\n main.output_result_update(title=\"AppVeyor: Update PYTHON environment variable\")\n return True\n return False",
"def config(key):\n with open(\"aws_config.json\") as conf:\n return json.load(conf)[key]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update INSTALLED_APPS setting by expanding requirements from AppConfigs
|
def update_installed_apps(settings, apps_option=None):
settings = SettingsDict.ensure(settings)
installed_apps = settings.get('INSTALLED_APPS')
if installed_apps:
installed_apps = expand_required_apps(installed_apps, option=apps_option)
settings['INSTALLED_APPS'] = installed_apps
|
[
"def set_installed_apps(self, apps):\n \n # Make sure it's a list.\n apps = list(apps)\n \n # This function will be monkeypatched into place.\n def new_get_apps():\n return apps\n \n # Monkeypatch in!\n models.get_apps_old, models.get_apps = models.get_apps, new_get_apps\n settings.INSTALLED_APPS, settings.OLD_INSTALLED_APPS = (\n apps,\n settings.INSTALLED_APPS,\n )\n self._redo_app_cache()",
"def update_settings(user_app_name):\n\n base_apps = None\n settings = open(SETTINGS, \"r\").readlines()\n settings_app = [x for x in settings if \"'APPS':\" in x][0]\n settings_at = settings.index(settings_app)\n\n apps = re.findall('.*?\\[(.*?)\\].*?', settings_app.strip())\n if apps and len(apps) == 1:\n apps = apps[0]\n if apps:\n base_apps = apps\n apps = [x.strip() for x in apps.split(',')]\n apps.extend([\"'%s'\" % user_app_name])\n apps = ', '.join(apps)\n\n settings_app = re.sub(base_apps, apps, settings_app)\n settings[settings_at] = settings_app\n settings = reduce(lambda a, b: a + b, settings)\n with open(SETTINGS, \"w\") as sfile:\n sfile.write(settings)",
"def reset_installed_apps(self):\n models.get_apps = models.get_apps_old\n settings.INSTALLED_APPS = settings.OLD_INSTALLED_APPS\n self._redo_app_cache()",
"def update_app():\n pull_project()\n restart_app()",
"def updateApp():\n newConfig = json.loads(request.data)\n logger.info('Method called with: {0}'.format(newConfig))\n\n system = ServiceOrchestrator(CONFIG_FILE)\n ok = system.updateApps(newConfig)\n\n return 'ok'",
"def tools_update(ignore_apps=False, ignore_packages=False):\n from yunohost.app import app_fetchlist, app_info\n\n packages = []\n if not ignore_packages:\n cache = apt.Cache()\n\n # Update APT cache\n msignals.display(m18n.n('updating_apt_cache'))\n if not cache.update():\n raise MoulinetteError(errno.EPERM, m18n.n('update_cache_failed'))\n msignals.display(m18n.n('done'))\n\n cache.open(None)\n cache.upgrade(True)\n\n # Add changelogs to the result\n for pkg in cache.get_changes():\n packages.append({\n 'name': pkg.name,\n 'fullname': pkg.fullname,\n 'changelog': pkg.get_changelog()\n })\n\n apps = []\n if not ignore_apps:\n try:\n app_fetchlist()\n except MoulinetteError:\n pass\n app_list = os.listdir(apps_setting_path)\n if len(app_list) > 0:\n for app_id in app_list:\n if '__' in app_id:\n original_app_id = app_id[:app_id.index('__')]\n else:\n original_app_id = app_id\n\n current_app_dict = app_info(app_id, raw=True)\n new_app_dict = app_info(original_app_id, raw=True)\n\n # Custom app\n if 'lastUpdate' not in new_app_dict or 'git' not in new_app_dict:\n continue\n\n if (new_app_dict['lastUpdate'] > current_app_dict['lastUpdate']) \\\n or ('update_time' not in current_app_dict['settings'] \\\n and (new_app_dict['lastUpdate'] > current_app_dict['settings']['install_time'])) \\\n or ('update_time' in current_app_dict['settings'] \\\n and (new_app_dict['lastUpdate'] > current_app_dict['settings']['update_time'])):\n apps.append({\n 'id': app_id,\n 'label': current_app_dict['settings']['label']\n })\n\n if len(apps) == 0 and len(packages) == 0:\n msignals.display(m18n.n('packages_no_upgrade'))\n\n return { 'packages': packages, 'apps': apps }",
"def push_apply_app_config(self) -> None:\n # To be safe, let's run this by itself in the event loop.\n # This avoids potential trouble if this gets called mid-draw or\n # something like that.\n self._pending_apply_app_config = True\n _babase.pushcall(self._apply_app_config, raw=True)",
"def update_config():\n g.config = app.config",
"def _getAppConfig(self, aAppName):\n app_module = importlib.import_module(\".\" + aAppName, \"applications\")\n app_cfg = ApplicationConfig(aAppName, app_module)\n return app_cfg",
"def install_apps(self):\n apps_path = Path(self._path) / 'apps'\n if self._clean:\n # remove apps folder\n if apps_path.is_dir():\n shutil.rmtree(str(apps_path))\n\n # create apps folder if not already there\n apps_path.mkdir(exist_ok=True)\n\n for app in self._workflow['apps']:\n if self._app_name == app or not self._app_name:\n\n Log.some().info(\n 'app: %s', app\n )\n\n # determine path to install app\n repo_path = apps_path / slugify(app, regex_pattern=r'[^-a-z0-9_]+')\n\n # create AppInstaller instance\n app_installer = AppInstaller(\n str(repo_path),\n {\n 'name': app,\n 'gfVersion': self._workflow['gfVersion'],\n 'class': 'app',\n **self._workflow['apps'][app]\n }\n )\n\n # check if git and/or version fields are there\n if (self._workflow['apps'][app]['git']):\n Log.some().info(\n 'app from git repo: %s:%s [%s]',\n app,\n self._workflow['apps'][app]['git'],\n self._workflow['apps'][app]['version']\n )\n\n # clone app into install location\n if not app_installer.clone_git_repo():\n Log.an().error('cannot clone app to %s', str(repo_path))\n return False\n\n else: \n Log.some().info(\n 'app from inline definition: %s',\n app\n )\n\n # write app.yaml based on inline definition\n if not app_installer.write_app_yaml():\n Log.an().error('cannot write app yaml')\n return False\n\n if not app_installer.load_app():\n Log.an().error('cannot load app config')\n return False\n\n if self._make_apps:\n if not app_installer.make():\n Log.an().error('cannot compile app templates')\n return False\n\n # update app definition with implementation section\n if not app_installer.update_def():\n Log.an().error(\n 'cannot update app \"%s\" definition',\n app\n )\n return False\n\n return True",
"def generate_depends(global_config, package_config):\n\n # now export the root as whatever is in export_base_path_as\n if global_config.get(\"export_base_path_as\"):\n \n relative_import(\n global_config[\"base_path\"],\n global_config[\"export_base_path_as\"]\n )\n \n # now load the config.settings file \n config_path = package_config[\"path\"]\n\n if config_path.endswith(\"/\"):\n config_path = config_path[:-1]\n\n # make the config module name\n config_module = \"configs.%s.settings\" % config_path.split(\"/\")[-1]\n\n if global_config.get(\"prefix\"):\n config_module = \"%s.%s\" % (global_config[\"prefix\"], config_module)\n\n package_config[\"depends\"] = merge_and_de_dupe(\n package_config[\"depends\"]\n )\n\n try:\n config_settings = dynamic_import(config_module)\n for app in config_settings.INSTALLED_APPS:\n if \"django.contrib.admin\" in app:\n \n migration_package = Debian.make_package_name(\n global_config[\"base_path\"],\n os.path.join(global_config[\"base_path\"],\"migrations\"), \n global_config.get(\"prefix\"),\n global_config.get(\"package_name_filters\")\n )\n \n # work out migration package\n migration_config = {\n \"path\":os.path.join(global_config[\"base_path\"],\"migrations\")\n }\n \n migration_version = Debian(global_config).get_package_version(\n migration_config\n )\n \n package_config[\"depends\"] = merge_and_de_dupe(\n package_config[\"depends\"],\n \"%s (>=%s)\" % (\n migration_package,\n migration_version\n )\n )\n\n elif not app.startswith(\"django\"):\n app_package = Debian.normalise_package_name(\n app,\n global_config.get(\"package_name_filters\")\n )\n \n package_config[\"depends\"] = merge_and_de_dupe(\n package_config[\"depends\"],app_package\n )\n except:\n display_warning('Suitcase Warning: No settings file found for config package %s' % package_config[\"package\"])\n\n if package_config.get(\"target_hosts\"):\n \n for host in package_config[\"target_hosts\"]:\n asset_path = os.path.join(\n global_config[\"base_path\"],\n \"assets\",\n \"domains\",\n host\n )\n \n if os.path.exists(asset_path):\n \n package_config[\"depends\"] = merge_and_de_dupe(\n package_config[\"depends\"],\n Debian.make_package_name(\n global_config[\"base_path\"],\n asset_path, \n global_config.get(\"prefix\"),\n global_config.get(\"package_name_filters\")\n )\n )\n\n template_path = os.path.join(\n global_config[\"base_path\"], \n \"templates\", \n \"root\",\n \"domains\",\n host\n )\n \n if os.path.exists(template_path):\n package_config[\"depends\"] = merge_and_de_dupe(\n package_config[\"depends\"],\n Debian.make_package_name(\n global_config[\"base_path\"],\n template_path, \n global_config.get(\"prefix\"),\n global_config.get(\"package_name_filters\")\n )\n )\n\n return package_config",
"def get_core_apps(overrides=None):\n if not overrides:\n return WSHOP_CORE_APPS\n\n # Conservative import to ensure that this file can be loaded\n # without the presence Django.\n from django.utils import six\n if isinstance(overrides, six.string_types):\n raise ValueError(\n \"get_core_apps expects a list or tuple of apps \"\n \"to override\")\n\n def get_app_label(app_label, overrides):\n pattern = app_label.replace('wshop.apps.', '')\n for override in overrides:\n if override.endswith(pattern):\n if 'dashboard' in override and 'dashboard' not in pattern:\n continue\n return override\n return app_label\n\n apps = []\n for app_label in WSHOP_CORE_APPS:\n apps.append(get_app_label(app_label, overrides))\n return apps",
"def update_package_config():\n try:\n import importlib\n import sys\n import json\n\n path = importlib.machinery.PathFinder().find_spec('sentinelhub', sys.path[1:]).submodule_search_locations[0]\n old_config_filename = os.path.join(path, 'config.json')\n\n with open(old_config_filename, 'r') as file:\n old_config = json.load(file)\n\n from sentinelhub.config import SHConfig\n\n config = SHConfig()\n for attr, value in old_config.items():\n if hasattr(config, attr) and not getattr(config, attr):\n setattr(config, attr, value)\n\n config.save()\n\n except BaseException:\n pass",
"def migrate(self, *apps):\n with cd(self.cfg['django']['DJANGO_ROOT']):\n if not apps:\n local('python manage.py migrate')\n else:\n for app in apps:\n local('python manage.py migrate %s' % app)",
"def _trim_to_apps(self, changes, app_labels):\n # Gather other app dependencies in a first pass\n app_dependencies = {}\n for app_label, migrations in changes.items():\n for migration in migrations:\n for dep_app_label, name in migration.dependencies:\n app_dependencies.setdefault(app_label, set()).add(dep_app_label)\n required_apps = set(app_labels)\n # Keep resolving till there's no change\n old_required_apps = None\n while old_required_apps != required_apps:\n old_required_apps = set(required_apps)\n required_apps.update(\n *[app_dependencies.get(app_label, ()) for app_label in required_apps]\n )\n # Remove all migrations that aren't needed\n for app_label in list(changes):\n if app_label not in required_apps:\n del changes[app_label]\n return changes",
"def install(app_name):\n\n module_paths = {\n u\"settings\": u\"{0}.settings\",\n u\"regex\": u\"{0}.regex\",\n u\"semantics\": u\"{0}.semantics\",\n }\n modules = {}\n\n for module_name, module_path in module_paths.iteritems():\n try:\n modules[module_name] = __import__(module_path.format(app_name),\n fromlist=[None])\n except ImportError, error:\n message = u\"Error importing {0!r}: {1}\"\n raise QuepyImportError(message.format(module_name, error))\n\n return QuepyApp(**modules)",
"def deploy():\n local('appcfg.py update src', capture=False)",
"def test_app_labels_1_7(self):\n if django.VERSION >= (1, 7):\n from django.apps import AppConfig\n else:\n # set up poor man's mock for 1.7/1.8 behaviour.\n from collections import namedtuple\n\n class AppConfig(object):\n call_count = 0\n ret_map = {\n 'example1': 'example1',\n 'example2.apps.Example2AppConfig': 'example2_app',\n }\n\n @classmethod\n def create(cls, app):\n return namedtuple('AppConfig', ['label'])(\n cls.ret_map[app]\n )\n dj_apps = self.set_up_module('django.apps')\n dj_apps.AppConfig = AppConfig\n\n reload(utils)\n\n self.set_up_module('example1')\n apps = self.set_up_module('example2.apps')\n\n # set up AppConfig on the `test_app.apps` module\n class Example2AppConfig(AppConfig):\n name = 'example2'\n label = 'example2_app' # with different name\n path = '/tmp' # for whatever reason path is required\n\n apps.Example2AppConfig = Example2AppConfig\n\n self.assertEqual(\n utils.app_labels([\n 'example1',\n 'example2.apps.Example2AppConfig'\n ]),\n ['example1', 'example2_app'],\n )",
"def sirtrevor_installed(app_configs, **kwargs):\n errors = []\n try:\n apps.get_app_config('sirtrevor')\n except LookupError:\n error = Error(\n 'Django SirTrevor must be in INSTALLED_APPS.',\n hint=\"Add 'sirtrevor' to INSTALLED_APPS.\",\n id='conman.pages.E001',\n )\n errors.append(error)\n\n return errors",
"def autodiscover():\n import copy\n from django.utils.importlib import import_module\n from django.conf import settings\n from django.utils.module_loading import module_has_submodule\n\n for app in settings.INSTALLED_APPS:\n mod = import_module(app)\n # Attempt to import the app's mommy module.\n try:\n import_module('%s.mommy' % app)\n except:\n # silently fail if mommy module does not exist\n if module_has_submodule(mod, 'mommy'):\n raise"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update TEMPLATES setting by adding context_processors from AppConfigs
|
def update_context_processors_from_apps(settings, processors_option=None):
settings = SettingsDict.ensure(settings)
installed_apps = settings.get('INSTALLED_APPS')
templates = settings.get('TEMPLATES')
if installed_apps and templates:
add_required_context_processors(templates, installed_apps, option=processors_option)
|
[
"def register_context_processors(app: Flask) -> None:\n app.context_processor(inject_get_alerts)\n app.context_processor(inject_get_hidden_alerts)\n app.context_processor(inject_a11y_url)",
"def _create_template_config(self, config):\n pass",
"def add_context_data(app, pagename, templatename, context, doctree):\n context['site'] = app.site_data\n # The translation context is pinned to the Italian sources, as Sphinx has\n # it's own translation mechanism built in\n if 'language' in context and context['language'] != None:\n language = context['language']\n else:\n language = app.site_data['default_language']\n context['t'] = app.site_data['data']['l10n'][language]['t']\n\n # Run only for local development\n if os.environ.get('READTHEDOCS', None) != 'True':\n context['LOCAL'] = True\n context['PRODUCTION_DOMAIN'] = 'localhost'\n context['slug'] = 'demo-document'\n context['current_version'] = 'bozza'\n context['rtd_language'] = 'it'\n context['publisher_project'] = u'Progetto demo'\n context['publisher_project_slug'] = 'progetto-demo'\n context['publisher'] = u'Organizzazione demo'\n context['publisher_slug'] = 'organizzazione-demo'\n context['tags'] = [\n ('demo', '#'),\n ('docs italia', '#')\n ]\n\n if 'docsitalia_data' in context:\n context['docstitle'] = context['docsitalia_data']['document']['name']\n else:\n try:\n with open(os.path.join(app.builder.srcdir,'document_settings.yml')) as document_settings:\n data = document_settings.read()\n data = yaml.safe_load(data)\n except:\n data = {\n 'document': {\n 'name': 'Titolo del documento non impostato'\n }\n }\n\n context['docsitalia_data'] = data",
"def set_context(self, context):",
"def inject_templates(self):\n\n # Sorry, found no other way to get this\n mod_path = sys.modules[self.__class__.__module__].__file__\n mod_dir = os.path.dirname(mod_path)\n tmpl_dir = os.path.join(\n mod_dir,\n 'templates',\n self.site.template_system.name\n )\n if os.path.isdir(tmpl_dir):\n # Inject tmpl_dir low in the theme chain\n self.site.template_system.inject_directory(tmpl_dir)",
"def __context(self):\n # TODO: do we need to include anything else?\n # TODO: do this with the django-settings-context-processor\n return { \"THEME_NAME\" : getattr(settings, \"THEME_NAME\", None) }",
"def webpack_config(context: Context):\n context.write_template('webpack.config.js')",
"def set_up_context(self, templates, **cnf):\n self.context_menu = self.make_menu(templates, **cnf)\n MenuUtils.bind_all_context(\n self,\n lambda event: ContextMenuMixin.popup(event, self.context_menu),\n add='+'\n )",
"def settings(request):\n\n old_vars = ('STATIC_DIR', 'PRIMARY_COLOR', 'SECONDARY_COLOR', 'CUSTOM_TRACKER_HTML')\n return dict((var, getattr(_settings, var, '')) for var in getattr(_settings, 'EXPOSE_TO_CONTEXT', old_vars))",
"def setup_render(self):\n self.loader = jinja2.FileSystemLoader(config.templatedir,\n encoding='utf-8')\n self.extensions = ['jinja2.ext.with_']\n myenv = jinja2.Environment(loader = self.loader,\n extensions = self.extensions,\n trim_blocks = True,\n autoescape = True,\n line_statement_prefix = \"#\")\n\n myenv.globals['thumbtype'] = thumbtype\n myenv.globals['thumbiconurl'] = self.thumbiconurl\n myenv.globals['thumbsize'] = thumbsize\n myenv.globals['imgsize'] = imgsize\n myenv.globals['url'] = self.url\n myenv.globals['lockedto'] = lambda : None\n\n myenv.filters = apps.templates.render.myenv.filters\n self.myenv = myenv",
"def setup_path_templating( self\n , pathContexts\n , pathDefinitions ):\n self.pStk = lamia.core.configuration.compose_stack(pathContexts, pathDefinitions)",
"def set_theme_in_context(request, response):\n \n #create the context data and set the theme variable\n request_context = response.resolve_context(response.context_data)\n request_context[_local_thread.keys['context']] = get_theme_from_cookie(request)\n \n response.context_data = request_context\n \n if settings.DEBUG:\n date = datetime.today()\n print('[' + date.strftime('%d/%b/%Y %X') + '] [CHAMELEON] Added theme to context data')",
"def post_create_template_hooker(self, **kwargs):\n self.template.update_tags(\n self.COMMON_TAGS.get_value(), overwrite=False)",
"def configure_template_filters(app):\r\n app.jinja_env.filters['format_date'] = format_date\r\n app.jinja_env.filters['time_since'] = time_since\r\n app.jinja_env.filters['older_than_one_month'] = older_than_one_month\r\n app.jinja_env.filters['time_left_to'] = time_left_to\r\n app.jinja_env.filters['is_online'] = is_online\r\n app.jinja_env.filters['crop_title'] = crop_title\r\n app.jinja_env.filters['quote'] = quote",
"def add_dynamic_context(self, plugin):\n self._add_plugin(plugin, self.context_switchers)",
"def get_context_data(self, **kwargs):\n ctx = super(WagtailMvcView, self).get_context_data(**kwargs)\n ctx.update(self.page.get_context(\n self.request,\n *self.args,\n **kwargs\n ))\n return ctx",
"def get_context(self, context, with_labels, admin_site):\n site = get_admin_site(admin_site)\n\n if context_passes_test(context) and site is not None:\n modeladmins = get_registered_modeladmins(context['request'], site)\n context.update({\n 'should_display_toolbar': True,\n 'should_display_apps': with_labels,\n 'app_list': _resort_modeladmins(modeladmins),\n })\n return context",
"def set_jinja2_options(self, **kw):\n\t\tglobal jinja_env\n\t\tjinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), **kw)",
"def _configure_templates(self, formatter):\n if formatter:\n for sub_formatter in formatter.split(self.FORMATTER_DELIMITER):\n try:\n key, value = sub_formatter.split(self.TEMPLATE_ASSIGNER)\n except ValueError:\n raise ValueError(f'Invalid template formatter: {sub_formatter!r}')\n if key != self.NUMBER_TOKEN:\n key = ast.literal_eval(key)\n self.template_map[key] = self.TEMPLATE_CLASS(value)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Wrap template loaders with cached loader on production (DEBUG = False)
|
def use_cache_template_loader_in_production(settings, cached_backends=None):
# FIXME: this is done by Django from version 1.11 onwards, thus drop this at some point
settings = SettingsDict.ensure(settings)
debug = settings.get('DEBUG', False)
templates = settings.get('TEMPLATES')
cached_backends = cached_backends or DEFAULT_CACHED_BACKENDS
if not templates or debug:
return
for conf in templates:
if conf['BACKEND'] in cached_backends:
options = conf.setdefault('OPTIONS', {})
loaders = options.get('loaders')
if not loaders or DEFAULT_CACHED_LOADER not in flatten_loaders(loaders):
if not loaders:
loaders = (DEFAULT_LOADER,)
if conf.get('APP_DIRS', False):
loaders += (DEFAULT_APP_LOADER,)
loaders = ((DEFAULT_CACHED_LOADER, loaders),)
options['loaders'] = loaders
conf.pop('APP_DIRS')
|
[
"def _load_compilers(self, caller):\n\n debug = dj_settings.DEBUG\n template = ''\n\n if hasattr(dj_settings, 'STATICLINK_CLIENT_COMPILERS'):\n for ext in dj_settings.STATICLINK_CLIENT_COMPILERS:\n if self._is_debug(ext):\n debug = True\n compiler = dj_settings.STATICLINK_CLIENT_COMPILERS[ext]\n template = '%s\\n<script src=\"%s\"></script>' % (template, compiler)\n\n if debug:\n template = \"%s\\n<script>localStorage.clear();</script>\" % template\n\n return self.environment.from_string(template).render()",
"def _get_loaders():\n from coffin.template.loaders import jinja_loader_from_django_loader\n if _LOADERS:\n return _LOADERS\n from django.conf import settings\n for loader in settings.TEMPLATE_LOADERS:\n if isinstance(loader, basestring):\n loader_obj = jinja_loader_from_django_loader(loader)\n if loader_obj:\n _LOADERS.append(loader_obj)\n else:\n warnings.warn('Cannot translate loader: %s' % loader)\n else: # It's assumed to be a Jinja2 loader instance.\n _LOADERS.append(loader)\n return _LOADERS",
"def jinja_loader(self):\n return ModuleTemplateLoader(\n self.database_name, searchpath=self.template_folder,\n )",
"def patch_load():\n import piglet.runtime\n\n saved = piglet.runtime.load\n piglet.runtime.load = lambda template, *args, **kwargs: template\n yield\n piglet.runtime.load = saved",
"def _maybe_patch_jinja_loader(jinja_env):\r\n if not isinstance(jinja_env.loader, ChoiceLoader):\r\n jinja_env.loader = ChoiceLoader([jinja_env.loader, package_loader])\r\n elif package_loader not in jinja_env.loader.loaders:\r\n jinja_env.loader.loaders.append(package_loader)",
"def get_template_loaders():\n try:\n from django.template.engine import Engine\n except ImportError: # Django < 1.8\n Engine = None\n\n if Engine:\n try:\n engine = Engine.get_default()\n except ImproperlyConfigured:\n loaders = []\n else:\n loaders = engine.template_loaders\n else: # Django < 1.8\n from django.template.loader import find_template_loader\n loaders = [\n find_template_loader(loader_name)\n for loader_name in settings.TEMPLATE_LOADERS]\n return loaders",
"def get_template_loader(self):\n return self._template_loader",
"def test_can_use_imported_templatetags(self):\n template = (\"{% load cachet i18n %}{% cachet %}\"\n \"{% get_current_language as lang %}{{ lang }}\"\n \"{% endcachet %}\")\n translation.activate('en')\n rendered = self.render_template(template)\n self.assertEqual(rendered, 'en')",
"def test_no_template_source_loaders(self):\n with self.assertLogs(\"django.request\", \"ERROR\"):\n with self.assertRaises(TemplateDoesNotExist):\n self.client.get(\"/render_no_template/\")",
"def inject_templates(self):\n\n # Sorry, found no other way to get this\n mod_path = sys.modules[self.__class__.__module__].__file__\n mod_dir = os.path.dirname(mod_path)\n tmpl_dir = os.path.join(\n mod_dir,\n 'templates',\n self.site.template_system.name\n )\n if os.path.isdir(tmpl_dir):\n # Inject tmpl_dir low in the theme chain\n self.site.template_system.inject_directory(tmpl_dir)",
"def passthrough_engine():\n\treturn JinjaEngine()",
"def configurable_inclusion_tag(register, cache_key=None, cache_time=60):\n\n def dec(func):\n params, xx, xxx, defaults = getargspec(func)\n if params[0] == 'context':\n params = params[1:]\n else:\n raise TemplateSyntaxError(\"Must have a first argument of 'context'\")\n\n class InclusionNode(Node):\n def __init__(self, vars_to_resolve):\n self.vars_to_resolve = list(map(Variable, vars_to_resolve))\n self.nodelists = {}\n\n @staticmethod\n def calculate_cache_key(args):\n if cache_key:\n if isinstance(cache_key, collections.Callable):\n return cache_key(*args)\n else:\n return cache_key\n return None\n\n def render(self, context):\n resolved_vars = [var.resolve(context) for var in self.vars_to_resolve]\n args = [context] + resolved_vars\n my_cache_key = self.calculate_cache_key(args)\n if my_cache_key:\n output = cache.cache.get(my_cache_key)\n if output:\n return output\n returnval = func(*args)\n if not returnval:\n return \"\"\n (file_name, dict_) = returnval\n\n if file_name not in self.nodelists:\n if not isinstance(file_name, str) and is_iterable(file_name):\n t = select_template(file_name)\n else:\n t = get_template(file_name)\n self.nodelists[file_name] = t.nodelist\n new_context = Context(dict_)\n # Copy across the CSRF token, if present, because inclusion\n # tags are often used for forms, and we need instructions\n # for using CSRF protection to be as simple as possible.\n csrf_token = context.get('csrf_token', None)\n if csrf_token is not None:\n new_context['csrf_token'] = csrf_token\n output = self.nodelists[file_name].render(new_context)\n if my_cache_key:\n cache.cache.set(my_cache_key, output, cache_time)\n return output\n\n compile_func = curry(old_generic_tag_compiler, params, defaults,\n getattr(func, \"_decorated_function\", func).__name__, InclusionNode)\n compile_func.__doc__ = func.__doc__\n register.tag(getattr(func, \"_decorated_function\", func).__name__, compile_func)\n return func\n\n return dec",
"def setup_render(self):\n self.loader = jinja2.FileSystemLoader(config.templatedir,\n encoding='utf-8')\n self.extensions = ['jinja2.ext.with_']\n myenv = jinja2.Environment(loader = self.loader,\n extensions = self.extensions,\n trim_blocks = True,\n autoescape = True,\n line_statement_prefix = \"#\")\n\n myenv.globals['thumbtype'] = thumbtype\n myenv.globals['thumbiconurl'] = self.thumbiconurl\n myenv.globals['thumbsize'] = thumbsize\n myenv.globals['imgsize'] = imgsize\n myenv.globals['url'] = self.url\n myenv.globals['lockedto'] = lambda : None\n\n myenv.filters = apps.templates.render.myenv.filters\n self.myenv = myenv",
"def factory(request):\n cache_factory = SimpleCacheFactory()\n return cache_factory",
"def update_cache(fn):\n @wraps(fn)\n def wrapped(self, *args, **kwargs):\n if not self._template_cached:\n self._pyuppaal = self.create_template()\n self._template_cached = True\n return fn(self, *args, **kwargs)\n\n return wrapped",
"def templateLoader(loadname):\n with open(loadname, 'rb') as loadfile:\n settings = load(loadfile)\n \n return settings",
"def test_slimfile_single_debug_off(self):\r\n settings.DEBUG = False\r\n settings.DJANGO_STATIC = True\r\n\r\n self._test_slimfile_single('/testing.js',\r\n 'var a = function() { return ; }')",
"def offline_context():\n # These years correspond to the years that we have base templates for in `rca_show/templates`\n for year in ['2016', '2017', '2018', '2019']:\n yield {\n 'STATIC_URL': settings.STATIC_URL,\n 'base_template': get_base_show_template(year),\n }",
"def cache_error_pers():\n return render_template('error.html')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
If this pixel is part of an edge, make it blue
|
def highlight_edges(edges, image):
image[edges>0.01] = [255, 0, 0]
|
[
"def color_edge(G, edge_id, color):\n G.edge[edge_id[0]][edge_id[1]]['color'] = color",
"def edge_color(e, g, pmap_component, pmap_color, default_color = \"black\"):\n u = g.source(e)\n v = g.target(e)\n color_u = pmap_color[pmap_component[u]]\n color_v = pmap_color[pmap_component[v]]\n return color_u if color_u == color_v else default_color",
"def test_edge_color_d2(self):\n vehicle_routing = VehicleRouting(self.graph, depot=2)\n self.assertEqual(\n vehicle_routing._edge_color(vehicle_routing.interpret(self.result_d2)),\n [0.0, 0.0, 0.0, 0.5, 0.5],\n )",
"def edge_detection(img):\n new_img = np.copy(img)\n\n # Sobel x\n r_channel = new_img[:, :, 0]\n gradx = abs_sobel_thresh(r_channel, orient='x', thresh=(12, 100))\n grady = abs_sobel_thresh(r_channel, orient='y', thresh=(25, 255))\n\n grad = np.zeros_like(gradx)\n grad[(gradx == 1) & (grady == 1)] = 1\n\n # Threshold color channel\n c_binary = color_thresh(img, s_thresh=(100, 255), v_thresh=(150, 255))\n\n # Combine the effects of all thresholds\n final = np.zeros_like(gradx)\n final[(gradx == 1) & (grady == 1) | c_binary == 1] = 1\n # return final.astype(np.uint8) * 255\n return final",
"def blue_channel(image):\n new_image = copy(image)\n \n for (x, y, (r, g, b)) in image:\n blue_colour = create_color(0,0,b)\n set_color(new_image, x, y, blue_colour)\n \n return new_image",
"def detect_edges(image, threshold):\n\ttarget = copy(image)\n\tblack = create_color(0,0,0)\n\twhite = create_color(255,255,255)\n\n\tfor y in range(1, get_height(image) - 1):\n\t\tfor x in range (1, get_width(image) - 1):\n\t\t\tcenter_red, center_green, center_blue = get_color(image, x, y)\n\t\t\tbelow_red, below_green, below_blue = get_color(image, x, y+1)\n\t\t\tright_red, right_green, right_blue = get_color(image, x+1, y)\n\n\n\t\t\tcenter_brightness = get_brightness(center_red, center_green,\n\t\t\t center_blue)\n\t\t\tbelow_brightness = get_brightness(below_red, below_green,\n\t\t\t below_blue)\n\t\t\tright_brightness = get_brightness(right_red, right_green,\n\t\t\t right_blue)\n\n\t\t\tif abs(center_brightness-below_brightness) > threshold or abs(\n\t\t\t center_brightness - right_brightness) > threshold:\n\t\t\t\tset_color(target, x, y, black)\n\t\t\telse:\n\t\t\t\tset_color(target, x, y, white)\n\tshow(target)",
"def edge_mask(self):",
"def pixel_color(self, x, y):\n\n x = int(x)-1\n y = int(y)-1\n\n try:\n return self.canvas[x][y]\n except:\n return None",
"def edge_track(image, weak, strong=255):\n\n # get the height and width of the image.\n (height, width) = image.shape[:2]\n \n # iterate through the edges, if the pixel value\n # equals the weak pixel ID: 45, then check all neighboring pixels\n # if one is strong set the pixel to strong, otherwise suppress it\n for row in xrange(height):\n for col in xrange(width):\n\n # check to see if weak pixel\n if image[row, col] == 45:\n\n # check if pixel to right is strong\n if (image[row+1,col] == strong):\n image[row][col] = strong\n\n # check if pixel to upper right is strong\n elif (image[row+1,col-1] == strong):\n image[row][col] = strong\n\n # check if pixel to lower right is strong\n elif (image[row+1,col+1] == strong):\n image[row][col] = strong\n \n # check if pixel to left is strong\n elif (image[row-1,col] == strong):\n image[row][col] = strong\n \n # check if pixel to bottom left is strong\n elif (image[row-1,col+1] == strong):\n image[row][col] = strong\n \n # check if pixel to upper left is strong\n elif (image[row-1,col-1] == strong):\n image[row][col] = strong\n \n # check if pixel below is strong\n elif (image[row,col+1] == strong):\n image[row][col] = strong\n \n # check if pixel above is strong\n elif (image[row,col-1] == strong):\n image[row][col] = strong\n \n # if no strong pixels around, suppress\n else:\n image[row][col] = 0\n\n # write output to file\n out = OUT_FOLDER+\"/custom_edge.jpg\"\n cv2.imwrite(out, image)\n\n # return edge matrix\n return image",
"def vertex_binary_color(binary: np.ndarray, x: int, y: int, r: float, r_factor: float, threshold: float) -> int:\n fill_ratio = circle_fill_ratio(binary, x, y, int(r * r_factor))\n if fill_ratio >= threshold:\n return 255\n else:\n return 0",
"async def hue_colour_rgb(self, ctx, red: float, green: float, blue: float, *, name=None):\n if not await self.get_bridge():\n await ctx.send(\"No IP has been set.\")\n return\n x, y = await self.rgb_to_xy(red, green, blue)\n for light in self.lights:\n if name is None or light.name.lower() == name.lower() and light.on:\n light.xy = [x, y]",
"def black(self):\n return self.red == 0 and self.green == 0 and self.blue == 0",
"def find_edge(img):\n y, x = img.shape\n return y if y < x else x",
"def detect_edges_better_test() -> None:\n \n original_image = create_image(7, 3, color = create_color(10, 10, 10)) # Creates image that is width 6 and height 2 set to a dark colour. \n bright = create_color(250, 250, 250)\n set_color(original_image, 1, 0, bright)\n set_color(original_image, 2, 1, bright)\n set_color(original_image, 2, 2, bright)\n set_color(original_image, 3, 0, bright)\n set_color(original_image, 3, 1, bright)\n set_color(original_image, 3, 2, bright)\n set_color(original_image, 4, 1, bright) \n \n expected_image = create_image(7, 3, color = create_color(255, 255, 255)) # Creates image that is width 6 and height 2 set to a bright colour.\n black = create_color(0, 0, 0)\n set_color(expected_image, 0, 0, black)\n set_color(expected_image, 1, 0, black)\n set_color(expected_image, 1, 1, black) \n set_color(expected_image, 1, 2, black) \n set_color(expected_image, 2, 0, black)\n set_color(expected_image, 3, 0, black)\n set_color(expected_image, 4, 0, black) \n set_color(expected_image, 4, 1, black) \n set_color(expected_image, 4, 2, black) \n \n actual_image = detect_edges_better(original_image, 100, False) # The Improved Edge Detection filter is applied to original_image. \n \n for x, y, (r, g, b) in actual_image:\n if (r, g, b) == tuple(get_color(expected_image, x, y)): # Checks if the pixels in the actual image equal the ones in the expected image.\n print('PASS')\n else:\n print('FAIL')",
"def _light_pixel(self):\r\n #print(\"x = {} : y = {}\".format(self.x,self.y))\r\n self.ap.set_pixel(self.x, self.y, \r\n self.colour[0], self.colour[1], self.colour[2])",
"def set_pixel(self, row, col, new_color):\n assert all([isinstance(row, int), isinstance(col, int)])\n assert all([row <= self.size()[0], col <= self.size()[1]])\n for i in range(3):\n if new_color[i] != -1:\n self.pixels[i][row][col] = new_color[i]",
"def maintain_blue(self):\n\n # Flag for checking if there exists a red node with larger LDTSD in\n # imediate neigbourhood\n if_larger_red_exists = False\n\n n_blue = 0.0\n n_nodes = 0.0\n\n for nid in self.neighbour_set:\n if not if_larger_red_exists:\n if SIMULATION_MAP[nid].color == 'red':\n if self.LDTSD < SIMULATION_MAP[nid].LDTSD:\n if_larger_red_exists = True\n self.color = 'green'\n self.membership_table_CH.append(nid)\n SIMULATION_MAP[nid].membership_table_member.append(self.nodeid)\n\n if SIMULATION_MAP[nid].color == 'blue':\n n_blue += 1.0\n\n n_nodes += 1.0\n\n if not if_larger_red_exists:\n r = 0\n # if number of nodes in neighbourhood is zero, white nodes ratio or\n # r will be undefined\n if n_nodes == 0:\n # Setting it to more than n2 so that node does not remain white\n # there are no nodes in neighbourhood. Node will try to initiate\n # nomination process when no nodes are found in neighbourhood\n r = CLUSTER_MAINTENANCE_N2 + 0.1\n else:\n r = n_blue/n_nodes\n if r >= CLUSTER_MAINTENANCE_N2:\n self.run_nomination_process()",
"def inverted_solid_fill_color(self, inverted_solid_fill_color):\n self._inverted_solid_fill_color = inverted_solid_fill_color",
"def blue():\n\n return color2float(Uint8Tensor([0, 162, 232]))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
If corner intensity is above a certain threshold, make it green
|
def highlight_significant_corners(corners, image):
# This line is equivalent to the nested loop below, but much faster.
image[corners > 0.01 * corners.max()] = [0, 255, 0]
# for rowIndex in range(len(corners)):
# for pixelIndex in range(len(corners[0])):
# if corners[rowIndex][pixelIndex] > (0.01 * corners.max()):
# image[rowIndex][pixelIndex] = [0, 255, 0]
|
[
"def colorThreshold(img, rbg_threshold = (60,60,60)):\n temp = np.zeros(img.shape)\n rflags_h = img[:,:]>rbg_threshold[0]\n\n temp[:,:][rflags_h] = 1\n \n return temp",
"def vertex_binary_color(binary: np.ndarray, x: int, y: int, r: float, r_factor: float, threshold: float) -> int:\n fill_ratio = circle_fill_ratio(binary, x, y, int(r * r_factor))\n if fill_ratio >= threshold:\n return 255\n else:\n return 0",
"def highlight_edges(edges, image):\n image[edges>0.01] = [255, 0, 0]",
"def threshold( self, snapshot ):\n import pygame\n snapshotMinusBackground = snapshot.copy()\n threshold_value = 40 # How close to the existing colour must each point be?\n pygame.transform.threshold( snapshotMinusBackground,\n snapshot,\n ( 0,0,0 ),\n [threshold_value]*3 ,\n ( 255,255,255 ),\n 1,\n self.background )\n # Median filter would be good here to remove salt + pepper noise...\n return snapshotMinusBackground",
"def color_threshold(img, s_thresh=(90, 255)):\n # Some other factors to consider 170 255\n # Convert to HLS color space and separate the V channel\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n s_channel = hls[:, :, 2]\n # l_channel = hls[:, :, 1] #TODO (ivan) consider this in future improvements\n # Threshold color channel\n s_binary = np.zeros_like(s_channel)\n s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1\n return s_binary",
"def clip_corners_on_intensity(corners, img, average_filter_size):\n value_per_degree = 255.0/360.0\n min_degree, max_degree = 60, 120 # +- 30 from 90 degrees\n\n # Since 255 is white and 0 is black, subtract from 255\n # to get black intensity instead of white intensity\n min_average_intensity = 255 - max_degree*value_per_degree\n max_average_intensity = 255 - min_degree*value_per_degree\n\n number_of_corners = len(corners)\n print number_of_corners\n\n min_intensity = np.array([min_average_intensity]*number_of_corners)\n max_intensity = np.array([max_average_intensity]*number_of_corners)\n\n img_average_intensity = make_circle_average_blurry(img, average_filter_size)\n\n corner_x = np.int0(corners[:,0])\n corner_y = np.int0(corners[:,1])\n\n corners_clipped_on_intensity = corners[\n np.logical_and(\n np.greater(\n img_average_intensity[corner_x,corner_y],\n min_intensity\n ), # Add top limit\n np.less(\n img_average_intensity[corner_x,corner_y],\n max_intensity\n ) # Add bottom limit\n )\n ]\n corner_x = np.int0(corners_clipped_on_intensity[:,0])\n corner_y = np.int0(corners_clipped_on_intensity[:,1])\n \n if np.ndim(corner_x) == 0:\n corners = np.array([[corner_x, corner_y]])\n intensities = np.array([img_average_intensity[corner_x, corner_y]])\n number_of_corners = 1\n else:\n corners = np.stack((corner_x, corner_y), axis=1)\n intensities = np.array(img_average_intensity[corner_x, corner_y])\n number_of_corners = len(corners)\n print number_of_corners\n\n print \"intensities: \", intensities\n\n if number_of_corners == 0:\n return None, None\n else:\n return corners, intensities",
"def color_thresh(input_img, rgb_thresh=(160, 160, 160),\n low_bound=(75, 130, 130), upp_bound=(255, 255, 255)):\n # Create arrays of zeros same xy size as input_img, but single channel\n nav_img = np.zeros_like(input_img[:, :, 0])\n obs_img = np.zeros_like(input_img[:, :, 0])\n\n # Convert BGR input_img to HSV for rock samples\n hsv_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2HSV)\n\n # Require that each of the R(0), G(1), B(2) pixels be above all three\n # rgb_thresh values such that pixpts_above_thresh will now contain a\n # boolean array with \"True\" where threshold was met\n pixpts_above_thresh = ((input_img[:, :, 0] > rgb_thresh[0])\n & (input_img[:, :, 1] > rgb_thresh[1])\n & (input_img[:, :, 2] > rgb_thresh[2]))\n\n pixpts_nonzero = ((input_img[:, :, 0] > 0)\n & (input_img[:, :, 1] > 0)\n & (input_img[:, :, 2] > 0))\n\n # obstacle pixels are those non-zero pixels where rgb_thresh was not met\n obs_pixpts = np.logical_and(\n pixpts_nonzero, np.logical_not(pixpts_above_thresh)\n )\n # Index the array of zeros with the boolean array and set to 1\n # those pixels where ROI threshold was met\n nav_img[pixpts_above_thresh] = 1\n obs_img[obs_pixpts] = 1\n\n # Threshold the HSV image to get only colors for gold rock samples\n rock_img = cv2.inRange(hsv_img, low_bound, upp_bound)\n\n # Return the threshed binary images\n ThreshedImages = namedtuple('ThreshedImages', 'nav obs rock')\n thresh_imgs = ThreshedImages(nav_img, obs_img, rock_img)\n\n return thresh_imgs",
"def detect_edges(image, threshold):\n\ttarget = copy(image)\n\tblack = create_color(0,0,0)\n\twhite = create_color(255,255,255)\n\n\tfor y in range(1, get_height(image) - 1):\n\t\tfor x in range (1, get_width(image) - 1):\n\t\t\tcenter_red, center_green, center_blue = get_color(image, x, y)\n\t\t\tbelow_red, below_green, below_blue = get_color(image, x, y+1)\n\t\t\tright_red, right_green, right_blue = get_color(image, x+1, y)\n\n\n\t\t\tcenter_brightness = get_brightness(center_red, center_green,\n\t\t\t center_blue)\n\t\t\tbelow_brightness = get_brightness(below_red, below_green,\n\t\t\t below_blue)\n\t\t\tright_brightness = get_brightness(right_red, right_green,\n\t\t\t right_blue)\n\n\t\t\tif abs(center_brightness-below_brightness) > threshold or abs(\n\t\t\t center_brightness - right_brightness) > threshold:\n\t\t\t\tset_color(target, x, y, black)\n\t\t\telse:\n\t\t\t\tset_color(target, x, y, white)\n\tshow(target)",
"def color_threshold(img, tscheme='HSV', cmap='BGR', channel='S', thresh=None):\n assert thresh is not None, \"Must specify a threshold. See this function's help.\"\n assert cmap in ['BGR', 'RGB'], 'Invalid input color map, choose either BGR or RGB.'\n assert tscheme in ['HSV', 'RGB'], 'Invalid target color scheme, choose either HSV or RGB.'\n assert channel in ['R', 'G', 'B', 'H', 'S', 'V'], 'Invalid target channel for color map.'\n\n if cmap == 'BGR':\n if tscheme == 'HSV':\n img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n elif tscheme == 'RGB':\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n elif cmap == 'RGB':\n if tscheme == 'HSV':\n img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n\n if tscheme == 'HSV':\n ch1 = img[:,:,0] # Hue channel\n ch2 = img[:,:,1] # Saturation channel\n ch3 = img[:,:,2] # Value channel\n\n else:\n ch1 = img[:,:,0] # Red channel\n ch2 = img[:,:,1] # Green channel\n ch3 = img[:,:,2] # Blue channel\n\n channel_select = {'H': ch1, 'S': ch2, 'V': ch3, \n 'R': ch1, 'G': ch2, 'B': ch3}\n\n binary = np.zeros_like(ch3)\n thresh_min, thresh_max = thresh[0], thresh[1] \n binary[(channel_select[channel] >= thresh_min) & (channel_select[channel] <= thresh_max)] = 1\n\n # OpenCV's Morphological Transformations can help a lot with removing \n # unwanted noise. See https://goo.gl/XFznnv for details of how this works.\n kernel = np.ones((2,2),np.uint8)\n binary = cv2.morphologyEx(binary, cv2.MORPH_OPEN, kernel)\n\n return binary",
"def apply_threshold(heatmap, threshold):\n\n # Zero out pixels below the threshold\n heatmap[heatmap <= threshold] = 0\n\n # Return thresholded map\n return heatmap",
"def detect_red_lights_basic(image):\n b, g, r = cv2.split(image)\n bf = b.astype(np.float) / 255.0\n gf = g.astype(np.float) / 255.0\n rf = r.astype(np.float) / 255.0\n\n rif = rf * (1.0 - bf) * (1.0 - gf) * 2.0 * 256.0\n rif = np.clip(rif, 0.0, 255.0)\n ri8 = rif.astype(np.uint8)\n ri8th = np.zeros_like(ri8)\n rtreshold = 200\n ri8th[ri8 > rtreshold] = 255\n\n gif = gf * (1.0 - bf) * (1.0 - rf) * 2.0 * 256.0\n gif = np.clip(gif, 0.0, 255.0)\n gi8 = gif.astype(np.uint8)\n gi8th = np.zeros_like(gi8)\n gtreshold = 200\n gi8th[gi8 > gtreshold] = 255\n\n red_count = np.count_nonzero(ri8th)\n green_count = np.count_nonzero(gi8th)\n print(\"red:\", red_count, \"green:\", green_count)\n\n if red_count > 10 and red_count >= 0.8 * green_count:\n return True\n else:\n return False",
"def gradient_threshold(img, color_space='BGR'):\n\tif color_space == 'BGR':\n\t\tgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\telif color_space == 'RGB':\n\t\tgray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n\telse:\n\t\traise Exception('Color Space Error')\n\n\t# Sobel x\n\tsobel_x = cv2.Sobel(gray, cv2.CV_64F, 1, 0) # Take the derivative in x\n\tabs_sobel_x = np.absolute(sobel_x) # Absolute x derivative to accentuate lines away from horizontal\n\tscaled_sobel = np.uint8(255 * abs_sobel_x / np.max(abs_sobel_x))\n\n\t# Threshold x gradient\n\tthresh_min = 20\n\tthresh_max = 100\n\tsobel_x_binary = np.zeros_like(scaled_sobel)\n\tsobel_x_binary[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 255\n\t# cv2.imshow('', sobel_x_binary)\n\t# cv2.waitKey(10000)\n\treturn sobel_x_binary",
"def double_threshold(image, upper, lower):\n\n # create lists for pixel identification\n strong = []\n weak = []\n suppressed = []\n\n # https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.where.html \n # get the index values where the edge is greater than the upper bound\n strong = np.where(image > upper)\n\n # get the index values where the edge is greater than the lower bound but\n # less than the upper bound\n weak = np.where((image >= lower) & (image < upper))\n\n # get the index values where the edge is lower than the lower bound\n suppressed = np.where(image < lower)\n\n # set the suppressed index values to 0\n image[suppressed[0], suppressed[1]] = 0\n\n # set the weak values to lower bound of 45\n image[weak[0], weak[1]] = 45\n\n # set the weak values to upper bound of 45\n image[strong[0], strong[1]] = 255\n\n # write output to file\n out = OUT_FOLDER+\"/threshold.jpg\"\n cv2.imwrite(out, image)\n\n # return the matrix of edges, and indexes of strong and weak edges\n return image, weak, strong",
"def go_straight_until_intensity_is_greater_than(self, intensity, speed):\n self.left_motor.turn_on(speed)\n self.right_motor.turn_on(speed)\n while True:\n sensor = ev3.ColorSensor()\n if sensor.reflected_light_intensity >= intensity:\n print('OK')\n break\n self.left_motor.turn_off()\n self.right_motor.turn_off()",
"def _apply_threshold(heatmap, threshold):\n # Zero out pixels below the threshold\n heatmap[heatmap <= threshold] = 0\n # Return thresholded map\n return heatmap",
"def colour_numeric(val: float, threshold: float = 0.95, colour: str = \"#FF2222\") -> str:\n if val < threshold:\n colour = \"black\"\n return \"color: %s\" % colour",
"def go_straight_until_intensity_is_less_than(self, intensity, speed):\n self.left_motor.turn_on(speed)\n self.right_motor.turn_on(speed)\n while True:\n sensor = ev3.ColorSensor()\n if sensor.reflected_light_intensity <= intensity:\n print('OK')\n break\n self.left_motor.turn_off()\n self.right_motor.turn_off()",
"def thresholding(self, thval=130):\n self.thval = thval\n self.temp_img[self.temp_img < thval] = thval",
"def brighten(val, minval):\n return minval + (255 - minval) * val // 255"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Forces an update of the batches no matter the current batch size. Prints errors if there are any.
|
def update_batches(self):
with self._commit_lock:
self._update_batches_force()
|
[
"def _update_batch_size(self):\n candidate = None\n for op in self.operations:\n op_batch_size = getattr(op, \"batch_size\", None)\n if op_batch_size is None:\n continue\n if candidate:\n if op_batch_size != candidate:\n raise ValueError(\n \"The batch sizes of the quantum script operations do not match, they include \"\n f\"{candidate} and {op_batch_size}.\"\n )\n else:\n candidate = op_batch_size\n\n self._batch_size = candidate",
"def _update_batch_size(self):\n candidate = None\n for op in self.operations:\n op_batch_size = getattr(op, \"batch_size\", None)\n if op_batch_size is None:\n continue\n if candidate and op_batch_size != candidate:\n raise ValueError(\n \"The batch sizes of the tape operations do not match, they include \"\n f\"{candidate} and {op_batch_size}.\"\n )\n candidate = candidate or op_batch_size\n\n self._batch_size = candidate",
"def update(self, batch_size):\n self.weights = self.weights_update.update_params(self.weights, self.change_weights / batch_size) # update weights\n\n if self.use_bias:\n self.bias = self.bias_update.update_params(self.bias, self.change_bias / batch_size) # update biases",
"def update_size(self, size):\n self.batch_size_estimation = size\n self.trust_batch_estimation = True",
"def _bulk_updater(db, docit, batchsize=500, new_edits=True):\n for batch in _batchit(docit, batchsize):\n if new_edits:\n for (ok, docid, rev) in db.update(batch):\n yield str(ok), str(docid), str(rev)\n else:\n for error in db.update(batch, new_edits=False):\n yield error",
"def beginBatchChanges(self): \r\n self._batchChangeDepth += 1",
"def batch_size(self, new_batch_size):\n\n self._batch_size = int(new_batch_size)",
"def run(self, batch_size=100):\n q = self.get_query()\n entities = do_w_retry(q.fetch, batch_size)\n count = 0\n while entities:\n to_put = []\n to_delete = []\n for entity in entities:\n map_updates, map_deletes = self.map(entity)\n to_put.extend(map_updates)\n to_delete.extend(map_deletes)\n if to_delete:\n do_w_retry(db.delete, to_delete)\n if to_put:\n do_w_retry(db.put, to_put)\n q = do_w_retry(self.get_query)\n q.filter(\"__key__ >\", entities[-1].key())\n entities = do_w_retry(q.fetch, batch_size)\n count += 1\n logger.info(\"Handled batch %s...\" % count)",
"def test_get_batch_normalization_updates(self):\n with batch_normalization(self.mlp):\n y_bn = self.mlp.apply(self.x)\n graph = ComputationGraph([y_bn])\n updates = get_batch_normalization_updates(graph)\n self.simple_assertions(updates)",
"def step(self, number_of_batches):\n tmp_hash = self.Get_parameter_hash(self.Get_parameter_dict())\n if self._parameter_hash != tmp_hash:\n if self._print_warning:\n print('WARNING::---------------------------------------------')\n print('you are re-initializing a new solver which will delete')\n print('the weight history of the solver.')\n print('Only use this option if you know what you are doing')\n self._print_warning = False\n self._Update_solver()\n return self._solver.step(number_of_batches)",
"def batch_learning(self, batch_dir, batch_size=int(1.0e5), max_batches=int(1.0e3)):\n\n\t\tlearning_samples = 0\n\t\tbatches = self.deserialize_batches(batch_dir, batch_size, max_batches)\n\n\t\tfor i, batch in enumerate(batches):\n\t\t\tbatch_features, batch_results, batch_weights = batch\n\n\t\t\tif self.model_type == 'rfc':\n\t\t\t\tself.model.fit(batch_features, batch_results, sample_weight=batch_weights)\n\t\t\t\tself.model.n_estimators += 1\n\t\t\telif self.model_type == 'sgd':\n\t\t\t\tself.model.partial_fit(batch_features, batch_results,\n\t\t\t\t\t\t\t\t\t classes=[0,1], sample_weight=batch_weights)\n\t\t\telse:\n\t\t\t\tprint((f\"Error: A decision tree classifier can't use batch learning.\"\n\t\t\t\t\t f\" Use <tree_learning> or change the ChessPipeline's model type.\"))\n\t\t\t\tbreak\n\n\t\t\tlearning_samples += len(batch_features)\n\t\t\tself.save_model()\n\t\t\tprint((f\"Batch {i + 1} learning complete. Number of chess positions \"\n\t\t\t\t f\"used in batch learning so far: {learning_samples}.\"))",
"def test_main_progress_bar_update_amount(\n tmpdir, train_batches: int, val_batches: int, refresh_rate: int, train_deltas: list, val_deltas: list\n):\n model = BoringModel()\n progress_bar = MockedUpdateProgressBars(refresh_rate=refresh_rate)\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_train_batches=train_batches,\n limit_val_batches=val_batches,\n callbacks=[progress_bar],\n logger=False,\n enable_checkpointing=False,\n )\n trainer.fit(model)\n if train_batches > 0:\n progress_bar.main_progress_bar.update.assert_has_calls([call(delta) for delta in train_deltas])\n if val_batches > 0:\n progress_bar.val_progress_bar.update.assert_has_calls([call(delta) for delta in val_deltas])",
"def pre_batch(self, batch_size):\n self.results = []",
"def test_autoscaling_group_update_replace_huge_batch_size(self):\r\n updt_template = json.loads(asg_tmpl_with_updt_policy)\r\n group = updt_template['Resources']['WebServerGroup']\r\n policy = group['UpdatePolicy']['AutoScalingRollingUpdate']\r\n policy['MinInstancesInService'] = '0'\r\n policy['MaxBatchSize'] = '20'\r\n config = updt_template['Resources']['LaunchConfig']\r\n update_image = 'F17-x86_64-cfntools'\r\n config['Properties']['ImageId'] = update_image\r\n\r\n self.update_autoscaling_group(asg_tmpl_with_updt_policy,\r\n json.dumps(updt_template),\r\n num_updates_expected_on_updt=10,\r\n num_creates_expected_on_updt=0,\r\n num_deletes_expected_on_updt=0,\r\n num_reloads_expected_on_updt=3,\r\n update_replace=True,\r\n update_image_id=update_image)",
"def set_batch_size(self, batch_size):\r\n\r\n self.batch_size = batch_size",
"def run(self, batch_size=100):\n q = self.get_query()\n entities = q.fetch(batch_size)\n while entities:\n to_put = []\n to_delete = []\n for entity in entities:\n map_updates, map_deletes = self.map(entity)\n to_put.extend(map_updates)\n to_delete.extend(map_deletes)\n if to_put:\n db.put(to_put)\n logging.info('entities written: %d' % len(to_put))\n if to_delete:\n db.delete(to_delete)\n logging.info('entities deleted: %d' % len(to_delete))\n q = self.get_query()\n q.filter(\"__key__ >\", entities[-1].key())\n entities = q.fetch(batch_size)",
"def set_batch_size(self, batch_size=100):\n self._batch_size = batch_size",
"def endBatchChanges(self): \r\n self._batchChangeDepth -= 1\r\n self.notifyObserversIfChanged()",
"def handle_updates(self):\r\n # For grouping same model classes for efficiency.\r\n updates = {}\r\n previous_path = None\r\n current_index = None\r\n\r\n for obj_identifier in self.actions['update']:\r\n (object_path, pk) = self.split_obj_identifier(obj_identifier)\r\n\r\n if object_path is None or pk is None:\r\n self.log.error(\"Skipping.\")\r\n continue\r\n\r\n if object_path not in updates:\r\n updates[object_path] = []\r\n\r\n updates[object_path].append(pk)\r\n\r\n # We've got all updates grouped. Process them.\r\n for object_path, pks in updates.items():\r\n model_class = self.get_model_class(object_path)\r\n\r\n if object_path != previous_path:\r\n previous_path = object_path\r\n current_index = self.get_index(model_class)\r\n\r\n if not current_index:\r\n self.log.error(\"Skipping.\")\r\n continue\r\n\r\n instances = [self.get_instance(model_class, pk) for pk in pks]\r\n\r\n # Filter out what we didn't find.\r\n instances = [instance for instance in instances if instance is not None]\r\n\r\n # Update the batch of instances for this class.\r\n # Use the backend instead of the index because we can batch the\r\n # instances.\r\n total = len(instances)\r\n self.log.debug(\"Indexing %d %s.\" % (total, object_path))\r\n\r\n for start in range(0, total, self.batchsize):\r\n end = min(start + self.batchsize, total)\r\n batch_instances = instances[start:end]\r\n\r\n self.log.debug(\" indexing %s - %d of %d.\" % (start+1, end, total))\r\n current_index._get_backend(self.using).update(current_index, batch_instances)\r\n\r\n for updated in batch_instances:\r\n self.processed_updates.add(\"%s.%s\" % (object_path, updated.pk))\r\n\r\n self.log.debug(\"Updated objects for '%s': %s\" % (object_path, \", \".join(pks)))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tries to resubmit failed submissions.
|
def _retry_failed_submissions(self):
still_failing = []
for create_func, batch_data in self._submission_fails:
try:
self._submit_batches.submit_update(create_func, batch_data)
except SubmitBatchesException:
still_failing.append((create_func, batch_data))
if self._print_verbose_activated:
if len(self._submission_fails) > 0:
print("Of", len(self._submission_fails), "/", len(still_failing),
"are still failing.")
self._submission_fails = still_failing
|
[
"def resubmit(self):\n self.id = None\n self.submit()",
"def _handle_submission_failure(self, calculation):\n self.abort_nowait('submission failed for the {} in iteration {}, but error handling is not implemented yet'\n .format(SiestaCalculation.__name__, self.ctx.iteration))",
"def resubmit_jobs(output, failed, missing, verbose):\n event_file = os.path.join(output, \"submit_jobs_events.log\")\n setup_event_logging(event_file, mode=\"a\")\n filename = os.path.join(output, \"submit_jobs.log\")\n level = logging.DEBUG if verbose else logging.INFO\n setup_logging(__name__, filename, file_level=level, console_level=level, mode=\"a\")\n\n cluster, promoted = Cluster.deserialize(\n output,\n try_promote_to_submitter=True,\n deserialize_jobs=True,\n )\n if not cluster.is_complete():\n cluster.demote_from_submitter()\n print(\"resubmit-jobs requires that the existing submission be complete\", file=sys.stderr)\n sys.exit(1)\n assert promoted\n\n jobs_to_resubmit = _get_jobs_to_resubmit(cluster, output, failed, missing)\n updated_blocking_jobs_by_name = _update_with_blocking_jobs(jobs_to_resubmit, output)\n _reset_results(output, jobs_to_resubmit)\n cluster.prepare_for_resubmission(jobs_to_resubmit, updated_blocking_jobs_by_name)\n\n ret = 1\n try:\n mgr = JobSubmitter.load(output)\n status = mgr.submit_jobs(cluster)\n if status == Status.IN_PROGRESS:\n print(f\"Resubmitted {len(jobs_to_resubmit)} jobs in {output}\")\n ret = 0\n else:\n ret = status.value\n except Exception:\n logger.exception(\"Failed to resubmit jobs\")\n raise\n finally:\n cluster.demote_from_submitter()\n\n sys.exit(ret)",
"def test_async_submit_exception(self):\n self.fake_api_backend._api_client = JobSubmitFailClient(max_fail_count=1)\n\n job_set = self._jm.run([self._qc]*2, backend=self.fake_api_backend,\n max_experiments_per_job=1)\n self.assertTrue(any(job is None for job in job_set.jobs()))\n self.assertTrue(any(job is not None for job in job_set.jobs()))\n\n # Make sure results() and statuses() don't fail\n job_set.results()\n job_set.statuses()",
"def _handleSubmitFailedJobs(self, badJobs, exitCode):\n fwjrBinds = []\n for job in badJobs:\n job['couch_record'] = None\n job['fwjr'] = Report()\n if exitCode in [71102, 71103]:\n job['fwjr'].addError(\"JobSubmit\", exitCode, \"SubmitFailed\",\n WM_JOB_ERROR_CODES[exitCode] + ', '.join(job['possibleSites']),\n ', '.join(job['possibleSites']))\n elif exitCode in [71101]:\n # there is no possible site\n if job.get(\"fileLocations\"):\n job['fwjr'].addError(\"JobSubmit\", exitCode, \"SubmitFailed\", WM_JOB_ERROR_CODES[exitCode] +\n \": file locations: \" + ', '.join(job['fileLocations']) +\n \": site white list: \" + ', '.join(job['siteWhitelist']) +\n \": site black list: \" + ', '.join(job['siteBlacklist']))\n else:\n job['fwjr'].addError(\"JobSubmit\", exitCode, \"SubmitFailed\",\n WM_JOB_ERROR_CODES[exitCode] + ', and empty fileLocations')\n\n else:\n job['fwjr'].addError(\"JobSubmit\", exitCode, \"SubmitFailed\", WM_JOB_ERROR_CODES[exitCode])\n\n fwjrPath = os.path.join(job['cache_dir'], 'Report.%d.pkl' % int(job['retry_count']))\n job['fwjr'].setJobID(job['id'])\n try:\n job['fwjr'].save(fwjrPath)\n fwjrBinds.append({\"jobid\": job[\"id\"], \"fwjrpath\": fwjrPath})\n except IOError as ioer:\n logging.error(\"Failed to write FWJR for submit failed job %d, message: %s\", job['id'], str(ioer))\n self.changeState.propagate(badJobs, \"submitfailed\", \"created\")\n self.setFWJRPathAction.execute(binds=fwjrBinds)\n return",
"def test_broken_save(self):\n\n original_xml = self.get_xml('original')\n edit_xml = self.get_xml('edit')\n\n result = submit_form_locally(original_xml, self.domain)\n xform = result.xform\n self.assertEqual(self.ID, xform.form_id)\n self.assertTrue(xform.is_normal)\n self.assertEqual(self.domain, xform.domain)\n\n self.assertEqual(\n UnfinishedSubmissionStub.objects.filter(xform_id=self.ID).count(),\n 0\n )\n\n with patch.object(self.interface.processor, 'save_processed_models', side_effect=HTTPError):\n with self.assertRaises(HTTPError):\n submit_form_locally(edit_xml, self.domain)\n\n xform = self.formdb.get_form(self.ID)\n self.assertIsNotNone(xform)\n # it didn't go through, so make sure there are no edits still\n self.assertIsNone(getattr(xform, 'deprecated_form_id', None))\n self.assertEqual(UnfinishedSubmissionStub.objects.filter(xform_id=self.ID).count(), 0)",
"def re_run_submission(request, submission_pk):\n try:\n submission = Submission.objects.get(pk=submission_pk)\n except Submission.DoesNotExist:\n response_data = {\n \"error\": \"Submission {} does not exist\".format(submission_pk)\n }\n return Response(response_data, status=status.HTTP_404_NOT_FOUND)\n\n if submission.ignore_submission:\n response_data = {\n \"error\": \"Deleted submissions can't be re-run\"\n }\n return Response(response_data, status=status.HTTP_400_BAD_REQUEST)\n\n # get the challenge and challenge phase object\n challenge_phase = submission.challenge_phase\n challenge = challenge_phase.challenge\n\n if not challenge.allow_participants_resubmissions and not is_user_a_host_of_challenge(request.user, challenge.pk):\n response_data = {\n \"error\": \"Only challenge hosts are allowed to re-run a submission\"\n }\n return Response(response_data, status=status.HTTP_403_FORBIDDEN)\n\n if not challenge.is_active:\n response_data = {\n \"error\": \"Challenge {} is not active\".format(challenge.title)\n }\n return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)\n\n message = handle_submission_rerun(submission, Submission.CANCELLED)\n publish_submission_message(message)\n response_data = {\n \"success\": \"Submission is successfully submitted for re-running\"\n }\n return Response(response_data, status=status.HTTP_200_OK)",
"def test_problem_batch_submit_error(self):\n\n job = models.ProblemJob(\n data=self.problem_data,\n params=dict(non_existing_param=1),\n solver=self.solver_id,\n type=self.problem_type,\n )\n\n statuses = self.api.submit_problems([job])\n\n self.assertIsInstance(statuses, list)\n self.assertEqual(len(statuses), 1)\n\n for status in statuses:\n self.assertIsInstance(status, models.ProblemSubmitError)\n self.assertEqual(status.error_code, 400)",
"def test_save_failed(self):\n tasks.raise_exception()\n with run_kuyruk(save_failed_tasks=True) as worker:\n worker.expect('ZeroDivisionError')\n worker.expect('No retry left')\n worker.expect('Saving failed task')\n worker.expect('Saved')\n worker.expect('Task is processed')\n\n assert is_empty('kuyruk')\n r = redis.StrictRedis()\n assert r.hvals('failed_tasks')\n\n run_requeue()\n assert not r.hvals('failed_tasks')\n assert not is_empty('kuyruk')",
"def test_04_reject_resubmit(self):\n\n # make the journal to update\n journal = models.Journal(**JournalFixtureFactory.make_journal_source(in_doaj=True))\n journal.set_id(\"123456789987654321\") # this id is the one the UR fixture uses for current_journal\n journal.save(blocking=True)\n\n acc = models.Account()\n acc.set_id(\"testadmin\")\n acc.set_role(\"admin\")\n acc.save(blocking=True)\n ctx = self._make_and_push_test_context(acc=acc)\n\n pub = models.Account()\n pub.set_id(\"publisher\")\n pub.set_email(\"publisher@example.com\")\n pub.save(blocking=True)\n\n # create an update request\n ur = models.Application(**UPDATE_REQUEST_SOURCE)\n ur.bibjson().publication_time_weeks = 1\n formulaic_context = ApplicationFormFactory.context(\"update_request\")\n fc = formulaic_context.processor(source=ur)\n fc.finalise()\n\n # get a handle on the update request\n ur = fc.target\n\n # reject that update request\n admin_context = ApplicationFormFactory.context(\"admin\")\n afc = admin_context.processor(source=ur)\n afc.form.application_status.data = constants.APPLICATION_STATUS_REJECTED\n afc.finalise(account=acc)\n\n # now make a new UR and process that to completion, expecting nothing to go awry\n ur = models.Application(**UPDATE_REQUEST_SOURCE)\n ur.bibjson().publication_time_weeks = 2\n formulaic_context = ApplicationFormFactory.context(\"update_request\")\n fc = formulaic_context.processor(source=ur)\n fc.finalise()\n\n ur = fc.target\n time.sleep(1)\n\n # accept the update request\n ur = models.Application.pull(ur.id)\n acfc = admin_context.processor(source=ur)\n acfc.form.application_status.data = constants.APPLICATION_STATUS_ACCEPTED\n afc.finalise(account=acc)\n\n # check that we only have one journal\n time.sleep(1)\n all = models.Journal.all()\n assert len(all) == 1\n assert all[0].bibjson().publication_time_weeks == 2",
"def _send_response(self):\n for identifier in self._identifiers:\n if identifier in self._responses and self._was_updated(identifier):\n response = requests.post(self._submit_url, {\n \"identifier\": identifier,\n \"api_key\": self._api_key,\n \"notebook\": str(self._notebook),\n \"response\": str(self._responses[identifier]),\n })\n assert response.text != \"SUBMISSION UNSUCCESSFUL\" and response.text == \"SUBMISSION SUCCESSFUL\", \\\n \"submission was not sent successfully\"\n self._updated_since_last_post[identifier] = False",
"def postHarvestError(self):\n if self.stopped or self.mode == 'TEST':\n return\n self.setStatus(self.__status, \"batch number \" + self.harvestInfo['batch_number'] + \" completed with error:\" + str.strip(self.errorLog))\n postRequest = Request(self.harvestInfo['response_url'] + \"?ds_id=\" + str(self.harvestInfo['data_source_id'])\n + \"&batch_id=\" + self.harvestInfo['batch_number'] + \"&status=\" + self.__status)\n self.logger.logMessage(\"ERROR URL:\" + postRequest.getURL(), \"INFO\")\n self.data = postRequest.postCompleted()\n del postRequest",
"def reject(self) -> None:\n\n assert self.state == 'submitted'\n self.state = 'rejected'",
"def fail_job(self, jobid, reason):",
"def fix_failures(self):\n db = self.db\n tasks = db(db.task_scheduled.status==RUNNING).select()\n ids = [task.id for task in tasks if \\\n task.last_run_time+timedelta(seconds=task.timeout) \\\n <datetime.now()]\n db(db.task_scheduled.id.belongs(ids)).update(status=OVERDUE)\n db(db.task_scheduled.status==QUEUED).update(assigned_worker_name=None)\n db.commit()",
"def finish(self):\n for action in self._pending_actions:\n if not action.is_done:\n action.done('failed')\n self._pending_actions = []",
"def fail(self):\n if self._check_notified():\n return\n self.result_synchronizer.notify((\"fail\",None))",
"def _postponeError(self, failure=None, rethrow=True):\n if failure is None:\n failure = tw_failure.Failure()\n self.postponedErrors.append(failure)\n if rethrow:\n failure.raiseException()",
"def badAttempt(self):\n self.attempts += 1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add one object or reference to this batcher based on the arguments passed.
|
def add(self, **kwargs: dict):
# all keys are mandatory for references
reference_keys = set(['from_object_uuid', 'from_object_class_name', 'from_property_name',\
'to_object_uuid'])
if kwargs.keys() == reference_keys:
with self._commit_lock:
self._last_update = time.time()
self._reference_batch.add(**kwargs)
self._update_batch_if_necessary()
return
# only mandatory keys
object_keys = set(['data_object', 'class_name'])
all_object_keys = set(['data_object', 'class_name', 'uuid', 'vector'])
if (not object_keys - kwargs.keys()) and set(kwargs).issubset(all_object_keys):
with self._commit_lock:
self._last_update = time.time()
self._objects_batch.add(**kwargs)
self._update_batch_if_necessary()
return
raise TypeError("Wrong arguments for adding data to batcher!\n"
f"Accepted arguments for references: {reference_keys}\n"
f"Accepted arguments for objects: {all_object_keys}! 'uuid' and 'vector' - optional\n")
|
[
"def add(self, *args, **kwargs):\n kw = copy.deepcopy(self.standard)\n kw.update(kwargs)\n self.append(self._base(*args, **kw))",
"def add(self, *args, **kwargs):\n nkwargs = kwargs.copy()\n nkwargs['addTo'] = self\n return self._maker.bind(*args, **nkwargs)",
"def add(self, *args, **kwargs):\n self.instances[0].add(*args, **kwargs)\n if self.instances[0].element_count() > self.max_elements:\n self.__add_instance()\n if len(self.instances) > self.max_instances:\n self.instances.pop()",
"def add(self, reference, outputs):\n raise NotImplementedError()",
"def add(uid, obj):",
"def add(self, obj):\n model_class = type(obj)\n model_key = model_class._meta.label\n self._create_queues[model_key].append(obj)\n if len(self._create_queues[model_key]) >= self.chunk_size:\n if not self._commit(model_class):\n print('Error entre %s -> %s' % (self.actual, self.actual + self.chunk_size))\n self.actual += self.chunk_size\n print(self.actual)",
"def append(self, object) :\n self.objects.append(object)",
"def __add__(self, other):\n return_bag = type(self)(self)\n #evaluates type then calls it w/self as a param linkedbag(self)\n for i in other:\n return_bag.add(i)\n return return_bag #returns an object",
"def add(self, arguments: Set[Argument]):\n self.arguments.update(arguments)\n for child in self.children:\n child.add(arguments)",
"def addItem(self, item: BatchItem):\n self.items.append(item)",
"def add(self, item):\n pass",
"def addItem(self, first_name: str, last_name: str, ID=\"unassigned\"):\n super().addItem(self.OriginBatchItem(first_name, last_name, ID))",
"def __add__(self, other):\n if isinstance(other, (Treant, Bundle)):\n return Bundle(self, other)\n else:\n raise TypeError(\"Operands must be Treants or Bundles.\")",
"def __call__(self, obj):\n self._buffer.append(obj)\n self._counter.count()\n if len(self._buffer) >= self._batch_size:\n self.flush()",
"def add(self, *args: Union[Transaction, TransactionInstruction]) -> Transaction:\n for arg in args:\n if isinstance(arg, Transaction):\n self.instructions.extend(arg.instructions)\n elif isinstance(arg, TransactionInstruction):\n self.instructions.append(arg)\n else:\n raise ValueError(\"invalid instruction:\", arg)\n\n return self",
"def addReference(self, ref1, ref2):\n \n idref1 = self.addObject( ref1 )\n idref2 = self.addObject( ref2 )\n \n if idref1 not in self.references:\n self.references[ idref1 ] = [ idref2, ]\n else:\n if idref2 not in self.references[ idref1 ]:\n self.references[ idref1 ].append( idref2 )",
"def __add__(self, other:Any):\r\n other = other if hasattr(other, '__iter__') else [other]\r\n return type(self)([*self, *other])",
"def __add__(self, other: Union[list[Coreg], Coreg, CoregPipeline]) -> CoregPipeline:\n if not isinstance(other, Coreg):\n other = list(other)\n else:\n other = [other]\n\n pipelines = self.pipeline + other\n\n return CoregPipeline(pipelines)",
"def add(self, *args):\n for arg in args:\n if isinstance(arg, tuple) or isinstance(arg, list):\n # got a list of things, add them one by one\n self.add(*item)\n \n if isinstance(arg, BaseOrganism):\n # add single organism\n self.organisms.append(arg)\n \n elif isinstance(arg, Population):\n # absorb entire population\n self.organisms.extend(arg)\n else:\n raise TypeError(\n \"can only add Organism or Population objects\")\n \n self.sorted = False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Closes this Batcher. Makes sure that all unfinished batches are loaded into weaviate. Batcher is not useable after closing.
|
def close(self):
# stop watchdog thread
if self._auto_commit_watchdog is not None:
with self._commit_lock:
self._auto_commit_watchdog.is_closed = True
retry_counter = 0
while len(self._objects_batch) > 0 or len(self._reference_batch) > 0 or\
len(self._submission_fails) > 0:
# update batches might have an connection error just retry until it is successful
self.update_batches()
retry_counter += 1
if retry_counter > 500:
print("CRITICAL ERROR objects can not be updated exit after 500 retries")
sys.exit(5)
self._reference_batch = None
self._objects_batch = None
self._client = None
|
[
"def close(self) -> None:\n self._close()\n if hasattr(self, '_iterator'):\n delattr(self, '_iterator')\n self._fire_listeners(EventType.CLOSE)",
"def end_batch(self) -> None:\n self.handle(events.EndBatch())",
"def close(self):\n iter_close(self._response_iter)",
"def close(self):\n self._jobboard.close()",
"def close(self):\n if self.parameters.multiprocesses:\n global main_pool\n main_pool.close()\n main_pool.join()\n gc.collect()",
"def Close(self):\n if not self.db_connection:\n if self.count != 0:\n raise exceptions.BatchSqlManagerException(\n \"Server-side Internal Error\"\n \" - db connection has been closed,\"\n \" while not all Sql statements are committed.\")\n return # already closed.\n\n self.logger.debug(\"Close...\")\n self.ExecuteRemaining()\n self.logger.debug(\"Close done.\")\n self.db_connection = None\n self.logger = None",
"def __exit__(self, *args: Any) -> None:\n self.close()",
"def close(self):\n \n for l in self._listeners.itervalues():\n l.close()\n \n self._log.info(\"Exiting gateway...\")\n logging.shutdown()",
"def close(self) -> None:\n if hasattr(self, '_wandb'):\n self._wandb.join()",
"def close(self) -> None:\n if self.lineageFetcher:\n self.lineageFetcher.close()",
"def __exit__(self, *args):\r\n\t\tself.io_buffer.close()",
"def close(self):\n self.__requests_session.close()",
"def close(self) -> None:\n utils.remove_handlers_from_logger(self.logger)\n self.metrics.close()",
"def close_all(cls):\n for mission in cls.active_missions:\n mission.finish()\n mission.stop_running()",
"def close(self):\n for f in self._opened_here:\n f.close()\n self._files = []\n self._opened_here = []",
"def close(self):\n self.flush()\n self.f.close()",
"def close(self):\n for f in self.fs: f.close()",
"def close_files(self):\n\t\tpass",
"def close(self):\n with self._lock:\n if self._closing:\n return\n self._closing = True\n if not self._workers:\n self._closed.set()\n return\n self._queue.put_nowait(self._PoolClosing)\n self._closed.wait()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calculate the (scaled) coincidence.
|
def _coincidence(x, y):
coincidence = (x * y).sum()
if scaled:
# Handle division by zero error
denom = x.sum() * y.sum()
if denom == 0:
coincidence = np.nan
else:
coincidence /= denom
return coincidence
|
[
"def coincidence(self):\n self.S2sCoin = []\n for i in range(self.NbS2Peaks):\n coin = 0\n for idx in range(self.nchs):\n if (self.S2s[idx][self.S2s_Key[i]] > 0.5): # TODO: different size for S1 and S2?\n coin += 1\n self.S2sCoin.append(coin)\n\n self.S1sCoin = []\n for i in range(self.NbS1Peaks):\n coin = 0\n for idx in range(self.nchs):\n if (self.S1s[idx][self.S1s_Key[i]] > 0.5):\n coin += 1\n self.S1sCoin.append(coin)",
"def coherence(self):\n if np.any(self.m < 50):\n simon(\"Number of segments used in averaging is \"\n \"significantly low. The result might not follow the \"\n \"expected statistical distributions.\")\n\n # Calculate average coherence\n unnorm_power_avg = self.unnorm_power\n\n num = np.absolute(unnorm_power_avg) ** 2\n\n # The normalization was 'none'!\n unnorm_powers_avg_1 = self.pds1.power.real\n unnorm_powers_avg_2 = self.pds2.power.real\n\n coh = num / (unnorm_powers_avg_1 * unnorm_powers_avg_2)\n\n # Calculate uncertainty\n uncertainty = \\\n (2 ** 0.5 * coh * (1 - coh)) / (np.abs(coh) * self.m ** 0.5)\n\n return (coh, uncertainty)",
"def calc(self, sid):\n\n\t\tchar_collocates = collocates_manager.get(sid, tpe='character',\n\t\t\trole=self.role, ranks=self.ranks)\n\t\tnoun_collocates = collocates_manager.get(sid, tpe='noun')\n\n\t\t# TODO: Figure out which order ir better here.\n\t\td1 = Probability.gen_dist(noun_collocates, smooth=False)\n\t\td2 = Probability.gen_dist(char_collocates, smooth=False)\n\n\t\treturn Probability.kl_divergence(d1, d2)",
"def c_for_benefit_score(pairs):\n \n # mapping pair outcomes to benefit\n obs_benefit_dict = {\n (0, 0): 0,\n (0, 1): -1,\n (1, 0): 1,\n (1, 1): 0,\n }\n \n # compute observed benefit for each pair\n obs_benefit = [obs_benefit_dict[(i[1], j[1])] for (i, j) in pairs]\n\n # compute average predicted benefit for each pair\n pred_benefit = [np.mean([i[0], j[0]]) for (i, j) in pairs]\n\n concordant_count, permissible_count, risk_tie_count = 0, 0, 0\n\n # iterate over pairs of pairs\n for i in range(len(pairs)):\n for j in range(i + 1, len(pairs)):\n \n # if the observed benefit is different, increment permissible count\n if obs_benefit[i] != obs_benefit[j]:\n\n # increment count of permissible pairs\n permissible_count = permissible_count +1\n \n # if concordant, increment count\n \n if ((pred_benefit[i] > pred_benefit[j] and obs_benefit[i] > obs_benefit[j]) or (pred_benefit[i] < pred_benefit[j] and obs_benefit[i] < obs_benefit[j])): # change to check for concordance\n \n concordant_count = concordant_count + 1\n\n # if risk tie, increment count\n if (pred_benefit[i] == pred_benefit[j]): #change to check for risk ties\n risk_tie_count = risk_tie_count + 1\n\n\n # compute c-statistic-for-benefit\n cstat = (concordant_count + (0.5 * risk_tie_count)) / permissible_count\n\n return cstat",
"def _calc_matching_prob(self):\n if not self.professional:\n return 1",
"def get_absolute_semantic_population_diversity(self):\n total_semantic_pairwise_distance = 0\n count = 0\n for i in range(self.population_size):\n for j in range(i + 1, self.population_size):\n bitvector_1 = self.correctly_predicted_bitvectors[i]\n bitvector_2 = self.correctly_predicted_bitvectors[j]\n semantic_distance = 0\n for k in range(len(bitvector_1)):\n if bitvector_1[k] != bitvector_2[k]:\n semantic_distance += 1\n total_semantic_pairwise_distance += semantic_distance\n count += 1\n return total_semantic_pairwise_distance / count",
"def calc(self, sid):\n\n\t\tchar_collocates = collocates_manager.get(sid, tpe='character',\n\t\t\trole=self.role, ranks=self.ranks)\n\t\tnoun_collocates = collocates_manager.get(sid, tpe='noun')\n\n\t\td1 = Probability.gen_dist(char_collocates, smooth=False)\n\t\td2 = Probability.gen_dist(noun_collocates, smooth=False)\n\n\t\treturn Probability.total_variation(d1, d2)",
"def seniority(self):\n s = sum(map(abs,self.occ['alpha'] - self.occ['beta']))\n return s",
"def clustering_coefficient(self):\n res = [] # list for storing ratios for each vertex\n edges = set(edge for sublist in self.edges() for edge in sublist)\n for v in self.vertices(): # iterate through all vertices\n out = self.out_vertices(v) # store the out vertices\n k_v = len(out) # len of out vertices is number of neighbours \n # compute the maximum number of neighbour connectedness:\n most = float(k_v) * (k_v - 1.0) / 2.0 \n actual = 0 # initiate count of actual neighbour connectedness\n # iterate through all possible combinations of vs neighbours:\n for w in out:\n for x in out: \n # If the Edge between the neighbour pair exists,\n # incresae the actual count by 1:\n if Edge(w, x) in edges or Edge(x, w) in edges:\n actual += 1\n # calculate vs ratio as actual divided by most:\n ratio = float(actual) / most\n res.append(ratio) # append to list of ratios\n return np.average(res) # use numpy to compute the mean ratio",
"def relative_different(self):\n I = self.absolute_different()\n J = self.ideal(self.base_field().absolute_different().gens())\n return I/J",
"def information_gain(self, subset, attr):\n perfect = 0.\n total = 0.\n for val in self.values[attr]:\n counts = self.value_counts(subset, attr, val)\n total += sum(counts.values())\n if len(counts) == 1:\n # Only one dependent value found; perfect classification\n perfect += counts.values()[0]\n counts = self.attr_counts\n return perfect / total",
"def compute_compositionality(self):\n #pdb.set_trace()\n compositionality = 0\n comparisons = 0\n meanings = self.meaning_space.meanings()\n for meaning1,meaning2 in itertools.combinations(meanings, 2):\n mdist = self.meaning_space.hamming(meaning1,meaning2)\n signals1 = self.speak(meaning1, pick=False)\n signals2 = self.speak(meaning2, pick=False)\n for signal1 in signals1:\n for signal2 in signals2:\n sdist = self.signal_space.hamming(signal1,signal2)\n compositionality += ((mdist * sdist) / (len(signals1) * len(signals2)))\n comparisons += 1\n #pdb.set_trace() \n return (compositionality/comparisons)",
"def correspondences(labels1,labels2):\n q = 100000\n assert amin(labels1)>=0 and amin(labels2)>=0\n assert amax(labels2)<q\n combo = labels1*q+labels2\n result = unique(combo)\n result = array([result//q,result%q])\n return result",
"def convexity_correction(self) -> float:\n raise NotImplementedError",
"def toss_once(self):\n for i in range(N):\n if self.picked_coin == \"normal\":\n random_num = round(random.uniform(0,1))\n if random_num == 1:\n self.counts[self.assumption] += 1\n elif self.picked_coin == \"biased\":\n self.p_given_assumption += 1\n return self.p_given_assumption/self.counts[self.assumption]",
"def calculateSimilarity(self, v1, v2):\n pass",
"def find_cc(theoreticalData, experimentalData):\n A=numpy.array(theoreticalData)\n B=numpy.array(experimentalData) \n cc= ( numpy.inner(A,B)/ ( numpy.linalg.norm(A)*numpy.linalg.norm(B) ) )\n print \"Cross-correlation: %5.3f\"%cc",
"def uniqness_measure(masks_predictions):\n #amp_all_cdist =cdist(all_amp_layer_weights, all_amp_layer_weights)\n sum_all_cdist =(cdist(masks_predictions, masks_predictions)).sum(axis=1)\n sum_all_cdist = normalize(sum_all_cdist)\n return sum_all_cdist",
"def get_overlap():\n proposed = 0\n for i in range(0,13):\n proposed += proposed_amounts[i] * staggering[i]\n return round(total - proposed - 100, 2)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compare detected events across channels. See full documentation in the methods of SpindlesResults and SWResults.
|
def compare_channels(self, score="f1", max_distance_sec=0):
from itertools import product
assert score in ["f1", "precision", "recall"], f"Invalid scoring metric: {score}"
# Extract events and channel
detected = self.summary()
chan = detected["Channel"].unique()
# Get indices of start in deciseconds, rounding to nearest deciseconds (100 ms).
# This is needed for three reasons:
# 1. Speed up the for loop
# 2. Avoid memory error in yasa.compare_detection
# 3. Make sure that max_distance works even when self and other have different sf.
# TODO: Only the Start of the event is currently supported. Add more flexibility?
detected["Start"] = (detected["Start"] * 10).round().astype(int)
max_distance = int(10 * max_distance_sec)
# Initialize output dataframe / dict
scores = pd.DataFrame(index=chan, columns=chan, dtype=float)
scores.index.name = "Channel"
scores.columns.name = "Channel"
pairs = list(product(chan, repeat=2))
# Loop across pair of channels
for c_index, c_col in pairs:
idx_chan1 = detected[detected["Channel"] == c_index]["Start"]
idx_chan2 = detected[detected["Channel"] == c_col]["Start"]
# DANGER: Note how we invert idx_chan2 and idx_chan1 here. This is because
# idx_chan1 (the index of the dataframe) should be the ground-truth.
res = compare_detection(idx_chan2, idx_chan1, max_distance)
scores.loc[c_index, c_col] = res[score]
return scores
|
[
"def compare_detection(self, other, max_distance_sec=0, other_is_groundtruth=True):\n detected = self.summary()\n if isinstance(other, (SpindlesResults, SWResults, REMResults)):\n groundtruth = other.summary()\n elif isinstance(other, pd.DataFrame):\n assert \"Start\" in other.columns\n assert \"Channel\" in other.columns\n groundtruth = other[[\"Start\", \"Channel\"]].copy()\n else:\n raise ValueError(\n f\"Invalid argument other: {other}. It must be a YASA detection output or a Pandas \"\n f\"DataFrame with the columns Start and Channels\"\n )\n\n # Get indices of start in deciseconds, rounding to nearest deciseconds (100 ms).\n # This is needed for three reasons:\n # 1. Speed up the for loop\n # 2. Avoid memory error in yasa.compare_detection\n # 3. Make sure that max_distance works even when self and other have different sf.\n detected[\"Start\"] = (detected[\"Start\"] * 10).round().astype(int)\n groundtruth[\"Start\"] = (groundtruth[\"Start\"] * 10).round().astype(int)\n max_distance = int(10 * max_distance_sec)\n\n # Find channels that are present in both self and other\n chan_detected = detected[\"Channel\"].unique()\n chan_groundtruth = groundtruth[\"Channel\"].unique()\n chan_both = np.intersect1d(chan_detected, chan_groundtruth) # Sort\n\n if not len(chan_both):\n raise ValueError(\n f\"No intersecting channel between self and other:\\n\"\n f\"{chan_detected}\\n{chan_groundtruth}\"\n )\n\n # The output is a pandas.DataFrame (n_chan, n_metrics).\n scores = pd.DataFrame(\n index=chan_both, columns=[\"precision\", \"recall\", \"f1\", \"n_self\", \"n_other\"], dtype=float\n )\n scores.index.name = \"Channel\"\n\n # Loop on each channel\n for c_index in chan_both:\n idx_detected = detected[detected[\"Channel\"] == c_index][\"Start\"]\n idx_groundtruth = groundtruth[groundtruth[\"Channel\"] == c_index][\"Start\"]\n if other_is_groundtruth:\n res = compare_detection(idx_detected, idx_groundtruth, max_distance)\n else:\n res = compare_detection(idx_groundtruth, idx_detected, max_distance)\n scores.loc[c_index, \"precision\"] = res[\"precision\"]\n scores.loc[c_index, \"recall\"] = res[\"recall\"]\n scores.loc[c_index, \"f1\"] = res[\"f1\"]\n scores.loc[c_index, \"n_self\"] = len(idx_detected)\n scores.loc[c_index, \"n_other\"] = len(idx_groundtruth)\n\n scores[\"n_self\"] = scores[\"n_self\"].astype(int)\n scores[\"n_other\"] = scores[\"n_other\"].astype(int)\n\n return scores",
"def compare_events(obtained, expected):\n\n result = {'true_positive': 0, 'false_positive': 0, 'false_negative': 0}\n # Iterate over a list of keys (frames) to be able to delete them from the dictionary\n for frame in list(expected):\n exp_event = expected[frame]\n if find_match(frame, exp_event, obtained):\n result['true_positive'] += 1\n del expected[frame]\n\n # TODO: If events that have a partial match - in particular the same frame number and/or same player (id) -\n # count as false_positive (incorrect) and remove from 'expected' dictionary\n # Count events that are left in `expected` as not detected\n result['false_negative'] += len(expected)\n # Count events that are left in `obtained` as incorrect\n result['false_positive'] += len(obtained)\n return result",
"def test_detect(self):\n\n for example in examples:\n if not (e_path / example / \"outputs/runs/example_run\").is_dir():\n continue\n\n print(f\"Testing detect output from {example} run.\")\n b_dir = b_path / example\n t_dir = pathlib.Path(f\"{str(t_path).format(example)}\") / \"detect\"\n\n print(\"\\t1: Assert same number of channels in .scanmseed...\")\n b_st = obspy.read(f\"{b_dir / '*.scanmseed'}\")\n t_st = obspy.read(f\"{t_dir / 'scanmseed' / '*.scanmseed'}\")\n self.assertEqual(len(b_st), len(t_st))\n print(\"\\t ...passed!\")\n\n print(\"\\t2: Assert meta data is identical...\")\n self.assertEqual(b_st[0].stats, t_st[0].stats)\n print(\"\\t ...passed!\")\n\n print(\"\\t3: Assert each data channels are identical...\")\n c_st = b_st + t_st\n c_st.merge(method=-1)\n self.assertEqual(len(c_st), len(b_st))\n print(\"\\t ...passed!\")\n\n print(\"\\t4: Assert availability files are identical...\")\n b_av = sorted(b_dir.glob(\"*Availability*\"))[0]\n t_av = sorted((t_dir / \"availability\").glob(\"*Availability*\"))[0]\n self.assertTrue(pd.read_csv(b_av).equals(pd.read_csv(t_av)))\n print(\"\\t ...passed!\")",
"def detect_freqs(self):\r\n channel_avgs = []\r\n differences = []\r\n for i in range(config.settings[\"devices\"][self.board][\"configuration\"][\"N_FFT_BINS\"]):\r\n channel_avgs.append(sum(self.freq_channels[i])/len(self.freq_channels[i]))\r\n differences.append(((self.freq_channels[i][0]-channel_avgs[i])*100)//channel_avgs[i])\r\n for i in [\"beat\", \"low\", \"mid\", \"high\"]:\r\n if any(differences[j] >= self.min_percent_diff[i]\\\r\n and self.freq_channels[j][0] >= self.min_detect_amplitude[i]\\\r\n for j in range(*self.detection_ranges[i]))\\\r\n and (time.time() - self.prev_freq_detects[i] > 0.1)\\\r\n and len(self.freq_channels[0]) == self.freq_channel_history:\r\n self.prev_freq_detects[i] = time.time()\r\n self.current_freq_detects[i] = True\r\n #print(i)\r\n else:\r\n self.current_freq_detects[i] = False",
"def CompareConv(x, h):\n global time\n start = time.time()\n freqConv = signal.convolve(x, h)\n end = time.time()\n freqTime = end - start\n\n start = time.time()\n timeConv = myTimeConv(x, h)\n end = time.time()\n timeTime = end - start\n\n time = np.array([freqTime, timeTime])\n\n m = np.mean(freqConv - timeConv) #mean difference\n mabs = np.mean(np.abs(freqConv - timeConv)) #mean absolute difference\n stdev = np.std(freqConv - timeConv) #standard deviation of the difference\n\n return m, mabs, stdev, time",
"def event_extraction_and_comparison(sr, sync):\n\n # it took 8 min to run that for 6 min of data, all 300 ish channels\n # silent channels for Guido's set:\n # [36,75,112,151,188,227,264,303,317,340,379,384]\n\n # sr,sync=get_ephys_data(sync_test_folder)\n\n _logger.info('starting event_extraction_and_comparison')\n period_duration = 30000 # in observations, 30 kHz\n BATCH_SIZE_SAMPLES = period_duration # in observations, 30 kHz\n\n # if the data is needed as well, loop over the file\n # raw data contains raw ephys traces, while raw_sync contains the 16 sync\n # traces\n rawdata, _ = sr.read_samples(0, BATCH_SIZE_SAMPLES)\n _, chans = rawdata.shape\n\n chan_fronts = {}\n\n sync_up_fronts = ephys_fpga.get_sync_fronts(sync, 0)['times'][0::2]\n sync_up_fronts = np.array(sync_up_fronts) * sr.fs\n\n assert len(sync_up_fronts) == 500, 'There are not all sync pulses'\n\n for j in range(chans):\n chan_fronts[j] = {}\n chan_fronts[j]['ephys up fronts'] = []\n\n k = 0\n\n # assure there is exactly one pulse per cut segment\n\n for pulse in range(500): # there are 500 square pulses\n\n first = int(sync_up_fronts[pulse] - period_duration / 2)\n last = int(first + period_duration / 2)\n\n if k % 100 == 0:\n print('segment %s of %s' % (k, 500))\n\n k += 1\n\n rawdata, rawsync = sr.read_samples(first, last)\n\n # get fronts for only one valid ephys channel\n obs, chans = rawdata.shape\n\n i = 0 # assume channel 0 is valid (to be generalized maybe)\n\n Mean = np.median(rawdata.T[i])\n Std = np.std(rawdata.T[i])\n\n ups = np.invert(rawdata.T[i] > Mean + 2 * Std)\n up_fronts = []\n\n # Activity front at least 10 samples long (empirical)\n\n up_fronts.append(first_occ_index(ups, 1) + first)\n\n chan_fronts[i]['ephys up fronts'].append(up_fronts)\n\n return chan_fronts, sync_up_fronts",
"def feed_comparison(self, channel):\n comparison_results = []\n retval = []\n # Alert if tower is not in feed DB\n if (channel[\"cgi_str\"] not in self.bad_cgis and\n channel[\"cgi_str\"] not in self.cgi_whitelist and\n channel[\"cgi_str\"] not in self.good_cgis):\n comparison_results.append(self.check_channel_against_feed(channel))\n # Else, be willing to alert if channel is not in range\n if (channel[\"cgi_str\"] not in self.bad_cgis and\n channel[\"cgi_str\"] not in self.cgi_whitelist and\n channel[\"cgi_str\"] not in self.good_cgis):\n comparison_results.append(self.check_channel_range(channel))\n # Test for primary BTS change\n if channel[\"cell\"] == '0':\n comparison_results.append(self.process_cell_zero(channel))\n for result in comparison_results:\n if result != ():\n retval.append(result)\n if len(retval) == 0:\n if channel[\"cgi_str\"] not in self.good_cgis:\n self.good_cgis.append(channel[\"cgi_str\"])\n return retval",
"def scan_motion(self):\n if self.initialized is not True:\n print(\"[DVR-Scan] Error: Scan context uninitialized, no analysis performed.\")\n return\n if len(self.live_stream) == 0:\n print(\"[DVR-Scan] Scanning %s for motion events...\" % (\n \"%d input videos\" % len(self.video_paths) if len(self.video_paths) > 1\n else \"input video\"))\n else:\n print(\"[DVR-Scan] Scanning %s for motion events...\" % self.live_stream)\n\n bg_subtractor = cv2.createBackgroundSubtractorMOG2(detectShadows=False)\n buffered_frames = []\n event_window = []\n self.event_list = []\n num_frames_post_event = 0\n event_start = None\n\n video_writer = None\n output_prefix = ''\n if self.comp_file:\n video_writer = cv2.VideoWriter(self.comp_file, self.fourcc,\n self.video_fps, self.video_resolution)\n elif len(self.live_stream) > 0:\n output_prefix = \"live_stream_\"\n elif len(self.video_paths[0]) > 0:\n output_prefix = os.path.basename(self.video_paths[0])\n dot_index = output_prefix.rfind('.')\n if dot_index > 0:\n output_prefix = output_prefix[:dot_index]\n\n curr_pos = FrameTimecode(self.video_fps, 0)\n #curr_state = 'no_event' # 'no_event', 'in_event', or 'post_even\n in_motion_event = False\n num_frames_read = 0\n num_frames_processed = 0\n processing_start = time.time()\n \n\n tqdm = dvr_scan.platform.get_tqdm()\n progress_bar = None\n self.frames_total = int(self.frames_total)\n if tqdm is not None and self.frames_total > 0 and not self.suppress_output:\n if self.end_time and self.end_time.frame_num < self.frames_total:\n self.frames_total = self.end_time.frame_num\n if self.start_time:\n self.frames_total -= self.start_time.frame_num\n if self.frames_total < 0:\n self.frames_total = 0\n progress_bar = tqdm.tqdm(\n total = self.frames_total, unit = ' frames',\n desc = \"[DVR-Scan] Processed\")\n\n # Seek to starting position if required.\n if self.start_time is not None:\n while curr_pos.frame_num < self.start_time.frame_num:\n if self._get_next_frame(False) is None:\n break\n num_frames_read += 1\n curr_pos.frame_num += 1\n\n # Motion event scanning/detection loop.\n while True:\n if self.end_time is not None and curr_pos.frame_num >= self.end_time.frame_num:\n break\n if self.frame_skip > 0:\n for _ in range(self.frame_skip):\n if self._get_next_frame(False) is None:\n break\n curr_pos.frame_num += 1\n num_frames_read += 1\n if progress_bar:\n progress_bar.update(1)\n frame_rgb = self._get_next_frame()\n if frame_rgb is None:\n break\n\n frame_gray = cv2.cvtColor(frame_rgb, cv2.COLOR_BGR2GRAY)\n frame_mask = bg_subtractor.apply(frame_gray)\n frame_filt = cv2.morphologyEx(frame_mask, cv2.MORPH_OPEN, self.kernel)\n frame_score = np.sum(frame_filt) / float(frame_filt.shape[0] * frame_filt.shape[1])\n event_window.append(frame_score)\n event_window = event_window[-self.min_event_len.frame_num:]\n\n if in_motion_event:\n # in event or post event, write all queued frames to file,\n # and write current frame to file.\n # if the current frame doesn't meet the threshold, increment\n # the current scene's post-event counter.\n if not self.scan_only_mode:\n video_writer.write(frame_rgb)\n if frame_score >= self.threshold:\n num_frames_post_event = 0\n else:\n num_frames_post_event += 1\n if num_frames_post_event >= self.post_event_len.frame_num:\n in_motion_event = False\n event_end = FrameTimecode(\n self.video_fps, curr_pos.frame_num)\n event_duration = FrameTimecode(\n self.video_fps, curr_pos.frame_num - event_start.frame_num)\n self.event_list.append((event_start, event_end, event_duration))\n if not self.comp_file and not self.scan_only_mode:\n video_writer.release()\n else:\n if not self.scan_only_mode:\n buffered_frames.append(frame_rgb)\n buffered_frames = buffered_frames[-self.pre_event_len.frame_num:]\n if len(event_window) >= self.min_event_len.frame_num and all(\n score >= self.threshold for score in event_window):\n in_motion_event = True\n if self.live_mode:\n print(\"Motion detected at: \" + datetime.datetime.now().strftime(\"%X\"))\n sys.stdout.flush()\n event_window = []\n num_frames_post_event = 0\n event_start = FrameTimecode(\n self.video_fps, curr_pos.frame_num)\n # Open new VideoWriter if needed, write buffered_frames to file.\n if not self.scan_only_mode:\n if not self.comp_file:\n output_path = '%s.DSME_%04d.avi' % (\n output_prefix, len(self.event_list))\n video_writer = cv2.VideoWriter(\n output_path, self.fourcc, self.video_fps,\n self.video_resolution)\n for frame in buffered_frames:\n video_writer.write(frame)\n buffered_frames = []\n\n curr_pos.frame_num += 1\n num_frames_read += 1\n num_frames_processed += 1\n if progress_bar:\n progress_bar.update(1)\n\n # If we're still in a motion event, we still need to compute the duration\n # and ending timecode and add it to the event list.\n if in_motion_event:\n curr_pos.frame_num -= 1 # Correct for the increment at the end of the loop\n event_end = FrameTimecode(\n self.video_fps, curr_pos.frame_num)\n event_duration = FrameTimecode(\n self.video_fps, curr_pos.frame_num - event_start.frame_num)\n self.event_list.append((event_start, event_end, event_duration))\n\n if video_writer is not None:\n video_writer.release()\n if progress_bar is not None:\n progress_bar.close()\n elif not self.suppress_output:\n processing_time = time.time() - processing_start\n processing_rate = float(num_frames_read) / processing_time\n print(\"[DVR-Scan] Processed %d / %d frames read in %3.1f secs (avg %3.1f FPS).\" % (\n num_frames_processed, num_frames_read, processing_time, processing_rate))\n if not len(self.event_list) > 0:\n print(\"[DVR-Scan] No motion events detected in input.\")\n return\n\n if not self.live_mode:\n print(\"[DVR-Scan] Detected %d motion events in input.\" % len(self.event_list))\n print(\"[DVR-Scan] Scan-only mode specified, list of motion events:\")\n print(\"-------------------------------------------------------------\")\n print(\"| Event # | Start Time | Duration | End Time |\")\n print(\"-------------------------------------------------------------\")\n for event_num, (event_start, event_end, event_duration) in enumerate(self.event_list):\n print(\"| Event %4d | %s | %s | %s |\" % (\n event_num + 1, event_start.get_timecode(precision=1),\n event_duration.get_timecode(precision=1),\n event_end.get_timecode(precision=1)))\n print(\"-------------------------------------------------------------\")\n\n if self.scan_only_mode:\n print(\"[DVR-Scan] Comma-separated timecode values:\")\n timecode_list = []\n for event_start, event_end, event_duration in self.event_list:\n timecode_list.append(event_start.get_timecode())\n timecode_list.append(event_end.get_timecode())\n print(','.join(timecode_list))\n else:\n print(\"[DVR-Scan] Motion events written to disk.\")",
"def compare_4stage(config, testname, outdir, oscfitfile):\n logging.debug('>> Working on baseline comparisons between both fitters.')\n logging.debug('>>> Doing %s test.'%testname)\n baseline_comparisons = from_file(oscfitfile)\n ref_abv='OscFit'\n\n pipeline = Pipeline(config)\n outputs = pipeline.get_outputs()\n\n total_pisa_events = 0.0\n total_oscfit_events = 0.0\n\n for nukey in baseline_comparisons.keys():\n\n baseline_map_to_plot = baseline_comparisons[nukey]\n oscfit_events = np.sum(baseline_map_to_plot['map'])\n\n cake_map = outputs.combine_wildcard('*')\n cake_map_to_plot = {}\n cake_map_to_plot['ebins'] = \\\n cake_map.binning['reco_energy'].bin_edges.magnitude\n cake_map_to_plot['czbins'] = \\\n cake_map.binning['reco_coszen'].bin_edges.magnitude\n if nukey == 'trck':\n texname = r'\\rm{trck}'\n cake_map_to_plot['map'] = \\\n cake_map.split(\n dim='pid',\n bin='trck'\n ).hist\n elif nukey == 'cscd':\n texname = r'\\rm{cscd}'\n cake_map_to_plot['map'] = \\\n cake_map.split(\n dim='pid',\n bin='cscd'\n ).hist\n pisa_events = np.sum(cake_map_to_plot['map'])\n\n max_diff_ratio, max_diff = plot_comparisons(\n ref_map=baseline_map_to_plot,\n new_map=cake_map_to_plot,\n ref_abv=ref_abv,\n new_abv=testname,\n outdir=outdir,\n subdir='recopidcombinedchecks',\n stagename=None,\n servicename='baseline',\n name=nukey,\n texname=texname,\n shorttitles=True,\n ftype=FMT\n )\n\n print_event_rates(\n testname1=testname,\n testname2='OscFit',\n kind=nukey,\n map1_events=pisa_events,\n map2_events=oscfit_events\n )\n\n total_pisa_events += pisa_events\n total_oscfit_events += oscfit_events\n\n print_event_rates(\n testname1=testname,\n testname2='OscFit',\n kind='all',\n map1_events=total_pisa_events,\n map2_events=total_oscfit_events\n )\n\n return pipeline",
"def checkChannels(self):\n\t\t\n\t\tb=True\n\t\t\n\t\tfor i,exp in enumerate(self.exps):\n\t\t\tnames=exp.getChannelNames()\n\t\t\tnames.sort()\n\t\t\t\n\t\t\tif i>0:\n\t\t\t\tif names!=lastNames:\n\t\t\t\t\tprintWarning(exp.name+ \" has not the same channels than \"+ self.exps[i-1])\n\t\t\t\t\tb=False\t\n\t\t\t\t\t\n\t\t\tlastNames=list(names)\n\t\t\t\n\t\treturn b",
"def all_fired(self):\n if False in self.channels_fired:\n return False\n else:\n return True",
"def detected_clusters_callback(self, detected_clusters_msg):\n # Waiting for the local map to be published before proceeding. This is ONLY needed so the benchmarks are consistent every iteration\n # Should be removed under regular operation\n if self.use_scan_header_stamp_for_tfs: # Assume <self.use_scan_header_stamp_for_tfs> means we're running the timing benchmark\n wait_iters = 0\n while self.new_local_map_received == False and wait_iters < 10:\n rospy.sleep(0.1)\n wait_iters += 1\n if wait_iters >= 10:\n rospy.loginfo(\"no new local_map received. Continuing anyways\")\n else:\n self.new_local_map_received = False\n\n now = detected_clusters_msg.header.stamp\n \n detected_clusters = []\n detected_clusters_set = set()\n for cluster in detected_clusters_msg.legs:\n new_detected_cluster = DetectedCluster(\n cluster.position.x, \n cluster.position.y, \n cluster.confidence, \n in_free_space=self.how_much_in_free_space(cluster.position.x, cluster.position.y)\n ) \n if new_detected_cluster.in_free_space < self.in_free_space_threshold:\n new_detected_cluster.in_free_space_bool = True\n else:\n new_detected_cluster.in_free_space_bool = False\n detected_clusters.append(new_detected_cluster)\n detected_clusters_set.add(new_detected_cluster) \n \n\t\t# Propogate existing tracks\n to_duplicate = set()\n propogated = copy.deepcopy(self.objects_tracked)\n for propogated_track in propogated:\n propogated_track.update(np.ma.masked_array(np.array([0, 0]), mask=[1,1])) \n if propogated_track.is_person:\n to_duplicate.add(propogated_track)\n \n # Duplicate tracks of people so they can be matched twice in the matching\n duplicates = {}\n for propogated_track in to_duplicate:\n propogated.append(copy.deepcopy(propogated_track))\n duplicates[propogated_track] = propogated[-1]\n\n # Match detected objects to existing tracks\n matched_tracks = self.match_detections_to_tracks_GNN(propogated, detected_clusters) \n \n # Publish non-human clusters so the local grid occupancy map knows which scan clusters correspond to people\n non_legs_msg = LegArray()\n non_legs_msg.header = detected_clusters_msg.header\n leg_clusters = set()\n for track, detect in matched_tracks.items(): \n if track.is_person:\n leg_clusters.add(detect)\n non_leg_clusters = detected_clusters_set.difference(leg_clusters)\n for detect in non_leg_clusters:\n non_leg = Leg(Point(detect.pos_x, detect.pos_y, 0), 1)\n non_legs_msg.legs.append(non_leg) \n self.non_leg_clusters_pub.publish(non_legs_msg) \n\n # Update all tracks with new oberservations \n tracks_to_delete = set() \n for idx, track in enumerate(self.objects_tracked):\n propogated_track = propogated[idx] # Get the corresponding propogated track\n if propogated_track.is_person:\n if propogated_track in matched_tracks and duplicates[propogated_track] in matched_tracks:\n # Two matched legs for this person. Create a new detected cluster which is the average of the two\n md_1 = matched_tracks[propogated_track]\n md_2 = matched_tracks[duplicates[propogated_track]]\n matched_detection = DetectedCluster((md_1.pos_x+md_2.pos_x)/2., (md_1.pos_y+md_2.pos_y)/2., (md_1.confidence+md_2.confidence)/2., (md_1.in_free_space+md_2.in_free_space)/2.)\n elif propogated_track in matched_tracks:\n # Only one matched leg for this person\n md_1 = matched_tracks[propogated_track]\n md_2 = duplicates[propogated_track]\n matched_detection = DetectedCluster((md_1.pos_x+md_2.pos_x)/2., (md_1.pos_y+md_2.pos_y)/2., md_1.confidence, md_1.in_free_space) \n elif duplicates[propogated_track] in matched_tracks:\n # Only one matched leg for this person \n md_1 = matched_tracks[duplicates[propogated_track]]\n md_2 = propogated_track\n matched_detection = DetectedCluster((md_1.pos_x+md_2.pos_x)/2., (md_1.pos_y+md_2.pos_y)/2., md_1.confidence, md_1.in_free_space) \n else: \n # No legs matched for this person \n matched_detection = None \n else:\n if propogated_track in matched_tracks:\n # Found a match for this non-person track\n matched_detection = matched_tracks[propogated_track]\n else:\n matched_detection = None \n\n if matched_detection:\n observations = np.array([matched_detection.pos_x, \n matched_detection.pos_y])\n track.in_free_space = 0.8*track.in_free_space + 0.2*matched_detection.in_free_space \n track.confidence = 0.95*track.confidence + 0.05*matched_detection.confidence \n track.times_seen += 1\n track.last_seen = now\n track.seen_in_current_scan = True\n else: # propogated_track not matched to a detection\n # don't provide a measurement update for Kalman filter \n # so send it a masked_array for its observations\n observations = np.ma.masked_array(np.array([0, 0]), mask=[1,1]) \n track.seen_in_current_scan = False\n \n # Input observations to Kalman filter\n track.update(observations)\n\n # Check track for deletion \n if track.is_person and track.confidence < self.confidence_threshold_to_maintain_track:\n tracks_to_delete.add(track)\n # rospy.loginfo(\"deleting due to low confidence\")\n else:\n # Check track for deletion because covariance is too large\n cov = track.filtered_state_covariances[0][0] + track.var_obs # cov_xx == cov_yy == cov\n if cov > self.max_cov:\n tracks_to_delete.add(track)\n # rospy.loginfo(\"deleting because unseen for %.2f\", (now - track.last_seen).to_sec())\n\n # Delete tracks that have been set for deletion\n for track in tracks_to_delete: \n track.deleted = True # Because the tracks are also pointed to in self.potential_leg_pairs, we have to mark them deleted so they can deleted from that set too\n self.objects_tracked.remove(track)\n \n # If detections were not matched, create a new track \n for detect in detected_clusters: \n if not detect in matched_tracks.values():\n self.objects_tracked.append(ObjectTracked(detect.pos_x, detect.pos_y, now, detect.confidence, is_person=False, in_free_space=detect.in_free_space))\n\n # Do some leg pairing to create potential people tracks/leg pairs\n for track_1 in self.objects_tracked:\n for track_2 in self.objects_tracked:\n if (track_1 != track_2 \n and track_1.id_num > track_2.id_num \n and (not track_1.is_person or not track_2.is_person) \n and (track_1, track_2) not in self.potential_leg_pairs\n ):\n self.potential_leg_pairs.add((track_1, track_2))\n self.potential_leg_pair_initial_dist_travelled[(track_1, track_2)] = (track_1.dist_travelled, track_2.dist_travelled)\n \n # We want to iterate over the potential leg pairs but iterating over the set <self.potential_leg_pairs> will produce arbitrary iteration orders.\n # This is bad if we want repeatable tests (but otherwise, it shouldn't affect performance).\n # So we'll create a sorted list and iterate over that.\n potential_leg_pairs_list = list(self.potential_leg_pairs)\n potential_leg_pairs_list.sort(key=lambda tup: (tup[0].id_num, tup[1].id_num))\n\n # Check if current leg pairs are still valid and if they should spawn a person\n leg_pairs_to_delete = set() \n for track_1, track_2 in potential_leg_pairs_list:\n # Check if we should delete this pair because \n # - the legs are too far apart \n # - or one of the legs has already been paired \n # - or a leg has been deleted because it hasn't been seen for a while\n dist = ((track_1.pos_x - track_2.pos_x)**2 + (track_1.pos_y - track_2.pos_y)**2)**(1./2.)\n if (dist > self.max_leg_pairing_dist \n or track_1.deleted or track_2.deleted\n or (track_1.is_person and track_2.is_person) \n or track_1.confidence < self.confidence_threshold_to_maintain_track \n or track_2.confidence < self.confidence_threshold_to_maintain_track\n ):\n leg_pairs_to_delete.add((track_1, track_2))\n continue\n\n # Check if we should create a tracked person from this pair\n # Three conditions must be met:\n # - both tracks have been matched to a cluster in the current scan\n # - both tracks have travelled at least a distance of <self.dist_travelled_together_to_initiate_leg_pair> since they were paired\n # - both tracks are in free-space\n if track_1.seen_in_current_scan and track_2.seen_in_current_scan:\n track_1_initial_dist, track_2_initial_dist = self.potential_leg_pair_initial_dist_travelled[(track_1, track_2)]\n dist_travelled = min(track_1.dist_travelled - track_1_initial_dist, track_2.dist_travelled - track_2_initial_dist)\n if (dist_travelled > self.dist_travelled_together_to_initiate_leg_pair \n and (track_1.in_free_space < self.in_free_space_threshold or track_2.in_free_space < self.in_free_space_threshold)\n ):\n if not track_1.is_person and not track_2.is_person:\n # Create a new person from this leg pair\n self.objects_tracked.append(\n ObjectTracked(\n (track_1.pos_x+track_2.pos_x)/2., \n (track_1.pos_y+track_2.pos_y)/2., now, \n (track_1.confidence+track_2.confidence)/2., \n is_person=True, \n in_free_space=0.)\n ) \n track_1.deleted = True\n track_2.deleted = True\n self.objects_tracked.remove(track_1)\n self.objects_tracked.remove(track_2)\n elif track_1.is_person:\n # Matched a tracked person to a tracked leg. Just delete the leg and the person will hopefully be matched next iteration\n track_2.deleted = True\n self.objects_tracked.remove(track_2)\n else: # track_2.is_person:\n # Matched a tracked person to a tracked leg. Just delete the leg and the person will hopefully be matched next iteration\n track_1.deleted = True\n self.objects_tracked.remove(track_1)\n leg_pairs_to_delete.add((track_1, track_2))\n\n # Delete leg pairs set for deletion\n for leg_pair in leg_pairs_to_delete:\n self.potential_leg_pairs.remove(leg_pair)\n\n # Publish to rviz and /people_tracked topic.\n self.publish_tracked_objects(now)\n self.publish_tracked_people(now)",
"def EventAnalysis(self):\n\n # Require a good data quality flag\n if self.DAQ.ND280OffFlag > 0:\n return\n\n if self.trigger and self.trigger == \"FGD\":\n if (not self.BasicHeader.FGDCosmicEvent) or self.BasicHeader.TripTCosmicEvent:\n return\n\n if self.ReconPerfEval.NGlobalReconObject > 25:\n return\n\n self.numEvents[\"All\"] += 1\n\n for obj in self.ReconPerfEval.GlobalReconObject:\n if obj.SetOK and obj.StatusString.find(\"success\") != -1:\n # FV and timing cuts, if requested.\n if self.cuttype:\n isMC = (self.BasicHeader.RunID > 100000)\n if not timing_cuts.PassesCut(isMC, self.BasicHeader.RunID, obj.Position.T()):\n continue\n if not volume_cuts.IsInVolume(obj.Position.Vect(), self.cuttype):\n continue\n\n path = obj.SubdetectorString\n new = False\n prev = False\n preprev = False\n summary = grtf_tools.ConvertPathToSummary(path)\n\n if path not in self.percentages[\"ByPos\"]:\n self.percentages[\"ByPos\"][path] = {\"Total\": 0, \"Fail\": 0}\n\n self.percentages[\"ByPos\"][path][\"Total\"] += 1\n failedByPos = False\n\n # Loop over the nodes and check for any that show bad kinks.\n for node in obj.GlobalNodes:\n new = node.NodeState\n\n if not new.SetOK:\n break\n\n if preprev:\n ok = self.FillByPosPlot(summary, preprev, prev, new, obj)\n if not ok:\n failedByPos = True\n\n preprev = prev\n prev = new\n\n # Print the details of this track if it contains a bad track.\n if failedByPos:\n self.percentages[\"ByPos\"][path][\"Fail\"] += 1\n\n if grtf_tools.ContainsTracker(obj.SubdetectorString):\n grtf_tools.PrintEvent(self, \"TRACKER-Failures.txt\", path)\n\n return",
"def identify_events_by_src(device_by_src, pkt_src, pkt_dst, device_categorization, event_identification):\n if device_categorization[pkt_src] == 'CAMERA':\n for k, v in sorted(device_by_src.iteritems()):\n if v > 100000:\n if check_motion_event(k, pkt_src, event_identification):\n # event_identification.append([k, DEVICE_NAME[pkt_src], DEVICE_NAME[pkt_dst], '1'])\n event_identification.append([k, DEVICE_NAME[pkt_src], '1'])\n elif device_categorization[pkt_src] == 'SENSOR':\n for k, v in sorted(device_by_src.iteritems()):\n if v > 10000:\n if check_motion_event(k, pkt_src, event_identification):\n # event_identification.append([k, DEVICE_NAME[pkt_src], DEVICE_NAME[pkt_dst], '1'])\n event_identification.append([k, DEVICE_NAME[pkt_src], '1'])\n\n return event_identification",
"def compare_5stage(config, testname, outdir, oscfitfile):\n logging.debug('>> Working on baseline comparisons between both fitters.')\n logging.debug('>>> Doing %s test.'%testname)\n baseline_comparisons = from_file(oscfitfile)\n ref_abv='OscFit'\n\n pipeline = Pipeline(config)\n outputs = pipeline.get_outputs()\n\n total_pisa_events = 0.0\n total_oscfit_events = 0.0\n\n for nukey in baseline_comparisons.keys():\n\n baseline_map_to_plot = baseline_comparisons[nukey]\n oscfit_events = np.sum(baseline_map_to_plot['map'])\n\n cake_map = outputs.combine_wildcard('*_%s'%nukey)\n if nukey == 'trck':\n texname = r'\\rm{trck}'\n elif nukey == 'cscd':\n texname = r'\\rm{cscd}'\n cake_map_to_plot = {}\n cake_map_to_plot['ebins'] = \\\n cake_map.binning['reco_energy'].bin_edges.magnitude\n cake_map_to_plot['czbins'] = \\\n cake_map.binning['reco_coszen'].bin_edges.magnitude\n cake_map_to_plot['map'] = cake_map.hist\n pisa_events = np.sum(cake_map_to_plot['map'])\n\n max_diff_ratio, max_diff = plot_comparisons(\n ref_map=baseline_map_to_plot,\n new_map=cake_map_to_plot,\n ref_abv=ref_abv,\n new_abv=testname,\n outdir=outdir,\n subdir='recopidcombinedchecks',\n stagename=None,\n servicename='baseline',\n name=nukey,\n texname=texname,\n shorttitles=True,\n ftype=FMT\n )\n\n print_event_rates(\n testname1=testname,\n testname2='OscFit',\n kind=nukey,\n map1_events=pisa_events,\n map2_events=oscfit_events\n )\n\n total_pisa_events += pisa_events\n total_oscfit_events += oscfit_events\n\n print_event_rates(\n testname1=testname,\n testname2='OscFit',\n kind='all',\n map1_events=total_pisa_events,\n map2_events=total_oscfit_events\n )\n\n return pipeline",
"def _verify_scan_channels(self):\n if 'scan' not in self.properties:\n self.logger.error(\"'scan' not found in properties\")\n return\n if 'ao_channel' not in self.properties['scan'] or self.properties['scan']['ao_channel'] not in [1,2]:\n self.logger.error(\"'ao_channel' not found in properties or invalid value (should be 1 or 2)\")\n return\n if 'ai_channel' not in self.properties['scan'] or self.properties['scan']['ai_channel'] not in [1,2]:\n self.logger.error(\"'ai_channel' not found in properties or invalid value (should be 1 or 2)\")\n return\n return self.properties['scan']['ao_channel'], self.properties['scan']['ai_channel']",
"def scatter_energies(self, events, prng=None):\n prng = parse_prng(prng)\n eidxs = np.argsort(events[\"energy\"])\n sorted_e = events[\"energy\"][eidxs]\n\n detectedChannels = []\n\n # run through all photon energies and find which bin they go in\n fcurr = 0\n last = sorted_e.shape[0]\n\n emin = sorted_e[0]\n emax = sorted_e[-1]\n\n pbar = tqdm(leave=True, total=last, desc=\"Scattering energies \")\n for (k, low), high in zip(enumerate(self.elo), self.ehi):\n if high < emin or low > emax:\n continue\n e = sorted_e[fcurr:last]\n nn = np.logical_and(low <= e, e < high).sum()\n if nn == 0:\n continue\n # weight function for probabilities from RMF\n weights = np.nan_to_num(np.float64(self.data[\"MATRIX\"][k]))\n weights /= weights.sum()\n trueChannel = self._make_channels(k)\n if len(trueChannel) > 0:\n channelInd = prng.choice(len(weights), size=nn, p=weights)\n detectedChannels.append(trueChannel[channelInd])\n fcurr += nn\n pbar.update(nn)\n\n pbar.close()\n\n for key in events:\n events[key] = events[key][eidxs]\n events[self.header[\"CHANTYPE\"]] = np.concatenate(detectedChannels)\n\n return events",
"def get_channels():",
"def pipeline(self, img):\n\n self.frame_count += 1\n\n unit_detections = self.detector.get_detections(img) # measurement\n\n unit_trackers = []\n\n for trk in self.tracker_list:\n unit_trackers.append(trk.unit_object)\n\n matched, unmatched_dets, unmatched_trks = self.assign_detections_to_trackers(unit_trackers, unit_detections,\n iou_thrd=0.3)\n\n LOGGER.debug('Detection: ' + str(unit_detections))\n LOGGER.debug('x_box: ' + str(unit_trackers))\n LOGGER.debug('matched:' + str(matched))\n LOGGER.debug('unmatched_det:' + str(unmatched_dets))\n LOGGER.debug('unmatched_trks:' + str(unmatched_trks))\n\n # Matched Detections\n for trk_idx, det_idx in matched:\n z = unit_detections[det_idx].box\n z = np.expand_dims(z, axis=0).T\n tmp_trk = self.tracker_list[trk_idx]\n tmp_trk.predict_and_update(z)\n xx = tmp_trk.x_state.T[0].tolist()\n xx = [xx[0], xx[2], xx[4], xx[6]]\n unit_trackers[trk_idx].box = xx\n unit_trackers[trk_idx].class_id = unit_detections[det_idx].class_id\n tmp_trk.unit_object = unit_trackers[trk_idx]\n tmp_trk.hits += 1\n tmp_trk.no_losses = 0\n\n # Unmatched Detections\n for idx in unmatched_dets:\n z = unit_detections[idx].box\n z = np.expand_dims(z, axis=0).T\n tmp_trk = self.tracker() # Create a new tracker\n x = np.array([[z[0], 0, z[1], 0, z[2], 0, z[3], 0]]).T\n tmp_trk.x_state = x\n tmp_trk.predict_only()\n xx = tmp_trk.x_state\n xx = xx.T[0].tolist()\n xx = [xx[0], xx[2], xx[4], xx[6]]\n tmp_trk.unit_object.box = xx\n tmp_trk.unit_object.class_id = unit_detections[idx].class_id\n tmp_trk.tracking_id = self.track_id_list.popleft() # assign an ID for the tracker\n self.tracker_list.append(tmp_trk)\n unit_trackers.append(tmp_trk.unit_object)\n\n # Unmatched trackers\n for trk_idx in unmatched_trks:\n tmp_trk = self.tracker_list[trk_idx]\n tmp_trk.no_losses += 1\n tmp_trk.predict_only()\n xx = tmp_trk.x_state\n xx = xx.T[0].tolist()\n xx = [xx[0], xx[2], xx[4], xx[6]]\n tmp_trk.unit_object.box = xx\n unit_trackers[trk_idx] = tmp_trk.unit_object\n\n # The list of tracks to be annotated\n good_tracker_list = []\n for trk in self.tracker_list:\n if (trk.hits >= self.min_hits) and (trk.no_losses <= self.max_age):\n good_tracker_list.append(trk)\n img = utils.drawing.draw_box_label(img, trk, self.detector.class_names)\n\n # Manage Tracks to be deleted\n deleted_tracks = filter(lambda x: x.no_losses > self.max_age, self.tracker_list)\n\n for trk in deleted_tracks:\n self.track_id_list.append(trk.tracking_id)\n\n self.tracker_list = [x for x in self.tracker_list if x.no_losses <= self.max_age]\n\n return img"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compare detected events between two detection methods, or against a groundtruth scoring. See full documentation in the methods of SpindlesResults and SWResults.
|
def compare_detection(self, other, max_distance_sec=0, other_is_groundtruth=True):
detected = self.summary()
if isinstance(other, (SpindlesResults, SWResults, REMResults)):
groundtruth = other.summary()
elif isinstance(other, pd.DataFrame):
assert "Start" in other.columns
assert "Channel" in other.columns
groundtruth = other[["Start", "Channel"]].copy()
else:
raise ValueError(
f"Invalid argument other: {other}. It must be a YASA detection output or a Pandas "
f"DataFrame with the columns Start and Channels"
)
# Get indices of start in deciseconds, rounding to nearest deciseconds (100 ms).
# This is needed for three reasons:
# 1. Speed up the for loop
# 2. Avoid memory error in yasa.compare_detection
# 3. Make sure that max_distance works even when self and other have different sf.
detected["Start"] = (detected["Start"] * 10).round().astype(int)
groundtruth["Start"] = (groundtruth["Start"] * 10).round().astype(int)
max_distance = int(10 * max_distance_sec)
# Find channels that are present in both self and other
chan_detected = detected["Channel"].unique()
chan_groundtruth = groundtruth["Channel"].unique()
chan_both = np.intersect1d(chan_detected, chan_groundtruth) # Sort
if not len(chan_both):
raise ValueError(
f"No intersecting channel between self and other:\n"
f"{chan_detected}\n{chan_groundtruth}"
)
# The output is a pandas.DataFrame (n_chan, n_metrics).
scores = pd.DataFrame(
index=chan_both, columns=["precision", "recall", "f1", "n_self", "n_other"], dtype=float
)
scores.index.name = "Channel"
# Loop on each channel
for c_index in chan_both:
idx_detected = detected[detected["Channel"] == c_index]["Start"]
idx_groundtruth = groundtruth[groundtruth["Channel"] == c_index]["Start"]
if other_is_groundtruth:
res = compare_detection(idx_detected, idx_groundtruth, max_distance)
else:
res = compare_detection(idx_groundtruth, idx_detected, max_distance)
scores.loc[c_index, "precision"] = res["precision"]
scores.loc[c_index, "recall"] = res["recall"]
scores.loc[c_index, "f1"] = res["f1"]
scores.loc[c_index, "n_self"] = len(idx_detected)
scores.loc[c_index, "n_other"] = len(idx_groundtruth)
scores["n_self"] = scores["n_self"].astype(int)
scores["n_other"] = scores["n_other"].astype(int)
return scores
|
[
"def compare_events(obtained, expected):\n\n result = {'true_positive': 0, 'false_positive': 0, 'false_negative': 0}\n # Iterate over a list of keys (frames) to be able to delete them from the dictionary\n for frame in list(expected):\n exp_event = expected[frame]\n if find_match(frame, exp_event, obtained):\n result['true_positive'] += 1\n del expected[frame]\n\n # TODO: If events that have a partial match - in particular the same frame number and/or same player (id) -\n # count as false_positive (incorrect) and remove from 'expected' dictionary\n # Count events that are left in `expected` as not detected\n result['false_negative'] += len(expected)\n # Count events that are left in `obtained` as incorrect\n result['false_positive'] += len(obtained)\n return result",
"def eval_detection_at(self, method=None, iou_threshold=0.1):\n self.print(f\"Evaluating detections @{iou_threshold}\")\n with self.gt as gt:\n # TODO: check if self.total_frames is working\n # gt = chain(gt, repeat(iter(())))\n gt = self.tqdm(gt, total=len(self))\n matches = (\n match_detections(detections, gt_boxes, iou_threshold)\n for detections, gt_boxes in zip(self.detect(method), gt)\n )\n matches = chain.from_iterable(matches)\n matches = sorted(matches, key=lambda m: m[0].confidence)\n TP = np.fromiter(map(lambda x: x[1] is not None, matches), bool)\n precision = TP.cumsum() / (np.arange(len(TP)) + 1)\n precision = np.flip(np.maximum.accumulate(precision[::-1]))\n\n recall = TP.cumsum() / len(self.gt)\n recall_diff = np.diff(np.insert(recall, 0, 0))\n score = (precision * recall_diff).sum()\n self.print(f\"AP@{iou_threshold}: {score}\")\n return score",
"def compare_channels(self, score=\"f1\", max_distance_sec=0):\n from itertools import product\n\n assert score in [\"f1\", \"precision\", \"recall\"], f\"Invalid scoring metric: {score}\"\n\n # Extract events and channel\n detected = self.summary()\n chan = detected[\"Channel\"].unique()\n\n # Get indices of start in deciseconds, rounding to nearest deciseconds (100 ms).\n # This is needed for three reasons:\n # 1. Speed up the for loop\n # 2. Avoid memory error in yasa.compare_detection\n # 3. Make sure that max_distance works even when self and other have different sf.\n # TODO: Only the Start of the event is currently supported. Add more flexibility?\n detected[\"Start\"] = (detected[\"Start\"] * 10).round().astype(int)\n max_distance = int(10 * max_distance_sec)\n\n # Initialize output dataframe / dict\n scores = pd.DataFrame(index=chan, columns=chan, dtype=float)\n scores.index.name = \"Channel\"\n scores.columns.name = \"Channel\"\n pairs = list(product(chan, repeat=2))\n\n # Loop across pair of channels\n for c_index, c_col in pairs:\n idx_chan1 = detected[detected[\"Channel\"] == c_index][\"Start\"]\n idx_chan2 = detected[detected[\"Channel\"] == c_col][\"Start\"]\n # DANGER: Note how we invert idx_chan2 and idx_chan1 here. This is because\n # idx_chan1 (the index of the dataframe) should be the ground-truth.\n res = compare_detection(idx_chan2, idx_chan1, max_distance)\n scores.loc[c_index, c_col] = res[score]\n\n return scores",
"def voc_eval(detections, annotations, ovthresh=0.5, use_confidence=False):\n # read annotations\n class_recs = {}\n npos = 0\n\n for frame_id, boxes in annotations.items():\n bbox = np.array([det.box for det in boxes])\n det = [False] * len(boxes)\n npos += len(boxes)\n class_recs[frame_id] = {\"bbox\": bbox, \"det\": det}\n \n # read detections\n\n image_ids = [x.frame for x in detections]\n BB = np.array([x.box for x in detections]).reshape(-1, 4)\n\n if use_confidence:\n confidence = np.array([float(x.confidence) for x in detections])\n # sort by confidence\n sorted_ind = np.argsort(-confidence)\n BB = BB[sorted_ind, :]\n image_ids = [image_ids[x] for x in sorted_ind]\n\n # go down detections (dets) and mark TPs and FPs\n nd = len(image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n for d in range(nd):\n R = class_recs[image_ids[d]]\n bb = BB[d, :].astype(float)\n ovmax = -np.inf\n BBGT = R[\"bbox\"].astype(float)\n\n if BBGT.size > 0:\n # compute overlaps\n overlaps = voc_iou(BBGT,bb)\n ovmax = np.max(overlaps)\n jmax = np.argmax(overlaps)\n\n if ovmax > ovthresh:\n if not R[\"det\"][jmax]:\n tp[d] = 1.0\n R[\"det\"][jmax] = 1\n else:\n fp[d] = 1.0\n else:\n fp[d] = 1.0\n\n # compute precision recall\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(npos)\n # avoid divide by zero in case the first detection matches a difficult\n # ground truth\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n ap = voc_ap(rec, prec)\n\n return rec, prec, ap",
"def do_comparison(obtained_output, expected_output, include_counters=False):\n # Rounding for precision, recall and f-score\n rounding = 4\n\n def get_precision(true_pos, false_pos):\n return round(true_pos / (true_pos + false_pos), rounding) if true_pos + false_pos > 0 else 0\n\n def get_recall(true_pos, false_neg):\n return round(true_pos / (true_pos + false_neg), rounding) if true_pos + false_neg > 0 else 0\n\n def get_f_score(true_pos, false_pos, false_neg):\n if true_pos + false_pos + false_neg > 0:\n return round(true_pos / (true_pos + 0.5 * (false_pos + false_neg)), rounding)\n return 0\n\n def get_stat_dict(true_pos, false_pos, false_neg):\n return {'precision': get_precision(true_pos, false_pos), 'recall': get_recall(true_pos, false_neg),\n 'f_score': get_f_score(true_pos, false_pos, false_neg),\n 'true_positive': true_pos, 'false_positive': false_pos, 'false_negative': false_neg}\n\n assert len(obtained_output) == len(expected_output)\n result = {'overall': {}, 'passes': {}, 'failed_passes': {}, 'shots': {}}\n\n expected = count_events(expected_output)\n if include_counters:\n obtained = count_events(obtained_output)\n result['counters'] = {'expected': expected.to_dict(), 'obtained': obtained.to_dict()}\n\n # correct, incorrect, not detected\n tp, fp, fn = 0, 0, 0\n for seq in expected_output:\n comparison = compare_events(obtained_output[seq], expected_output[seq])\n tp += comparison['true_positive']\n fp += comparison['false_positive']\n fn += comparison['false_negative']\n\n result['overall'] = get_stat_dict(tp, fp, fn)\n\n # Events that are left in obtained_output dictionary are false positive (incorrectly recognized) events\n fp_events = count_events(obtained_output)\n # Events that are left in expected_output dictionary are false negative (not detected) events\n fn_events = count_events(expected_output)\n if include_counters:\n result['counters'].update({'false_positive': fp_events.to_dict(),\n 'false_negative': fn_events.to_dict()})\n\n tp_passes = expected.passes - fn_events.passes\n result['passes'] = get_stat_dict(tp_passes, fp_events.passes, fn_events.passes)\n\n tp_failed_passes = expected.failed_passes - fn_events.failed_passes\n result['failed_passes'] = get_stat_dict(tp_failed_passes, fp_events.failed_passes, fn_events.failed_passes)\n\n tp_shots = expected.shots - fn_events.shots\n result['shots'] = get_stat_dict(tp_shots, fp_events.shots, fn_events.shots)\n return result",
"def compare_5stage(config, testname, outdir, oscfitfile):\n logging.debug('>> Working on baseline comparisons between both fitters.')\n logging.debug('>>> Doing %s test.'%testname)\n baseline_comparisons = from_file(oscfitfile)\n ref_abv='OscFit'\n\n pipeline = Pipeline(config)\n outputs = pipeline.get_outputs()\n\n total_pisa_events = 0.0\n total_oscfit_events = 0.0\n\n for nukey in baseline_comparisons.keys():\n\n baseline_map_to_plot = baseline_comparisons[nukey]\n oscfit_events = np.sum(baseline_map_to_plot['map'])\n\n cake_map = outputs.combine_wildcard('*_%s'%nukey)\n if nukey == 'trck':\n texname = r'\\rm{trck}'\n elif nukey == 'cscd':\n texname = r'\\rm{cscd}'\n cake_map_to_plot = {}\n cake_map_to_plot['ebins'] = \\\n cake_map.binning['reco_energy'].bin_edges.magnitude\n cake_map_to_plot['czbins'] = \\\n cake_map.binning['reco_coszen'].bin_edges.magnitude\n cake_map_to_plot['map'] = cake_map.hist\n pisa_events = np.sum(cake_map_to_plot['map'])\n\n max_diff_ratio, max_diff = plot_comparisons(\n ref_map=baseline_map_to_plot,\n new_map=cake_map_to_plot,\n ref_abv=ref_abv,\n new_abv=testname,\n outdir=outdir,\n subdir='recopidcombinedchecks',\n stagename=None,\n servicename='baseline',\n name=nukey,\n texname=texname,\n shorttitles=True,\n ftype=FMT\n )\n\n print_event_rates(\n testname1=testname,\n testname2='OscFit',\n kind=nukey,\n map1_events=pisa_events,\n map2_events=oscfit_events\n )\n\n total_pisa_events += pisa_events\n total_oscfit_events += oscfit_events\n\n print_event_rates(\n testname1=testname,\n testname2='OscFit',\n kind='all',\n map1_events=total_pisa_events,\n map2_events=total_oscfit_events\n )\n\n return pipeline",
"def compare_4stage(config, testname, outdir, oscfitfile):\n logging.debug('>> Working on baseline comparisons between both fitters.')\n logging.debug('>>> Doing %s test.'%testname)\n baseline_comparisons = from_file(oscfitfile)\n ref_abv='OscFit'\n\n pipeline = Pipeline(config)\n outputs = pipeline.get_outputs()\n\n total_pisa_events = 0.0\n total_oscfit_events = 0.0\n\n for nukey in baseline_comparisons.keys():\n\n baseline_map_to_plot = baseline_comparisons[nukey]\n oscfit_events = np.sum(baseline_map_to_plot['map'])\n\n cake_map = outputs.combine_wildcard('*')\n cake_map_to_plot = {}\n cake_map_to_plot['ebins'] = \\\n cake_map.binning['reco_energy'].bin_edges.magnitude\n cake_map_to_plot['czbins'] = \\\n cake_map.binning['reco_coszen'].bin_edges.magnitude\n if nukey == 'trck':\n texname = r'\\rm{trck}'\n cake_map_to_plot['map'] = \\\n cake_map.split(\n dim='pid',\n bin='trck'\n ).hist\n elif nukey == 'cscd':\n texname = r'\\rm{cscd}'\n cake_map_to_plot['map'] = \\\n cake_map.split(\n dim='pid',\n bin='cscd'\n ).hist\n pisa_events = np.sum(cake_map_to_plot['map'])\n\n max_diff_ratio, max_diff = plot_comparisons(\n ref_map=baseline_map_to_plot,\n new_map=cake_map_to_plot,\n ref_abv=ref_abv,\n new_abv=testname,\n outdir=outdir,\n subdir='recopidcombinedchecks',\n stagename=None,\n servicename='baseline',\n name=nukey,\n texname=texname,\n shorttitles=True,\n ftype=FMT\n )\n\n print_event_rates(\n testname1=testname,\n testname2='OscFit',\n kind=nukey,\n map1_events=pisa_events,\n map2_events=oscfit_events\n )\n\n total_pisa_events += pisa_events\n total_oscfit_events += oscfit_events\n\n print_event_rates(\n testname1=testname,\n testname2='OscFit',\n kind='all',\n map1_events=total_pisa_events,\n map2_events=total_oscfit_events\n )\n\n return pipeline",
"def test_detect(self):\n\n for example in examples:\n if not (e_path / example / \"outputs/runs/example_run\").is_dir():\n continue\n\n print(f\"Testing detect output from {example} run.\")\n b_dir = b_path / example\n t_dir = pathlib.Path(f\"{str(t_path).format(example)}\") / \"detect\"\n\n print(\"\\t1: Assert same number of channels in .scanmseed...\")\n b_st = obspy.read(f\"{b_dir / '*.scanmseed'}\")\n t_st = obspy.read(f\"{t_dir / 'scanmseed' / '*.scanmseed'}\")\n self.assertEqual(len(b_st), len(t_st))\n print(\"\\t ...passed!\")\n\n print(\"\\t2: Assert meta data is identical...\")\n self.assertEqual(b_st[0].stats, t_st[0].stats)\n print(\"\\t ...passed!\")\n\n print(\"\\t3: Assert each data channels are identical...\")\n c_st = b_st + t_st\n c_st.merge(method=-1)\n self.assertEqual(len(c_st), len(b_st))\n print(\"\\t ...passed!\")\n\n print(\"\\t4: Assert availability files are identical...\")\n b_av = sorted(b_dir.glob(\"*Availability*\"))[0]\n t_av = sorted((t_dir / \"availability\").glob(\"*Availability*\"))[0]\n self.assertTrue(pd.read_csv(b_av).equals(pd.read_csv(t_av)))\n print(\"\\t ...passed!\")",
"def get_anomaly_detection():",
"def eval_tracking(self, method, detection_method=None, iou_threshold=0.5):\n\n def extract(boxes):\n boxes = list(boxes)\n objs = list(map(lambda box: box.obj_id, boxes))\n box_arr = np.stack([box.ltwh for box in boxes]) if boxes else np.array([])\n return objs, box_arr\n\n self.print(f\"Evaluating tracking...\")\n accumulator = mm.MOTAccumulator(auto_id=True)\n\n with self.gt as gt:\n gt = chain(gt, repeat(iter(())))\n gt = self.tqdm(gt, total=len(self))\n for tracks, gt_boxes in zip(\n self.tracking(method, detection_method, False), gt\n ):\n gt_objs, gt_box_arr = extract(gt_boxes)\n track_objs, track_box_arr = extract(tracks)\n dists = mm.distances.iou_matrix(\n gt_box_arr, track_box_arr, max_iou=iou_threshold\n )\n\n accumulator.update(\n gt_objs, track_objs, dists,\n )\n\n mh = mm.metrics.create()\n summary = mh.compute(\n accumulator, metrics=[\"num_frames\", \"idf1\", \"mota\"], name=\"Full\"\n )\n\n self.print(summary)\n return summary[\"idf1\"][0]",
"def computeDetectedStats(self, abstract):\n self.groupSizes = []\n self.outcomeNumbers = []\n self.eventRates = []\n self.stats = []\n self.trueStats = False\n self.abstract = abstract\n\n # pair outcome measurements for same outcome in same sentence, for different groups\n omHash = {}\n for oTemplate in abstract.entities.getList('outcome'):\n oTemplate.unusedNumbers = []\n omList = oTemplate.getOutcomeMeasurements()\n if len(omList) > 1:\n # there are at least *two* measurements for this outcome\n omHash[oTemplate] = {}\n for om in omList:\n if om.eventRate() != None:\n sentence = om.getSentence()\n if sentence not in omHash[oTemplate]:\n omHash[oTemplate][sentence] = {}\n \n group = om.getGroup()\n if group not in omHash[oTemplate][sentence]:\n omHash[oTemplate][sentence][group] = []\n \n omHash[oTemplate][sentence][group].append(om) \n \n # now try to pair up outcome measurements for the same outcome in same sentence\n for oTemplate in omHash.keys():\n for s in omHash[oTemplate].keys():\n groupList = omHash[oTemplate][s].keys()\n # check for multiple measurements for same group,outcome\n # for now, delete them\n for gTemplate in groupList:\n if len(omHash[oTemplate][s][gTemplate]) != 1:\n print abstract.id, '!!! Illegal number of outcome measurements:', len(omHash[oTemplate][s][gTemplate])\n print 'Outcome =', oTemplate.name\n print 'Group =', gTemplate.name \n for om in omHash[oTemplate][s][gTemplate]:\n om.display()\n omHash[oTemplate][s][gTemplate] = []\n \n for i in range(0, len(groupList)-1):\n gTemplate = groupList[i]\n if len(omHash[oTemplate][s][gTemplate]) > 0:\n # skip measurements for this group for now if there is more than one measurement for this group,outcome\n # NOTE: with current matching scheme, it should not be possible for a group to have more than *one*\n # measurement for an outcome. Others should have been discarded.\n # *However*, it is possible when using annotated info for associations (for ceiling analysis)\n om1 = omHash[oTemplate][s][gTemplate][0]\n for j in range(i+1, len(groupList)):\n gTemplate2 = groupList[j]\n if len(omHash[oTemplate][s][gTemplate2]) == 1:\n om2 = omHash[oTemplate][s][gTemplate2][0]\n ssTemplate = SummaryStat(om1, om2)\n self.stats.append(ssTemplate)\n om1.used = True\n om2.used = True \n \n # check for unused outcome measurements \n for gTemplate in groupList:\n if len(omHash[oTemplate][s][gTemplate]) == 1:\n om = omHash[oTemplate][s][gTemplate][0]\n if om.used == False:\n # we could not find a matching measurement, add to list of unused\n oTemplate.unusedNumbers.append(om)",
"def compare(img1, img2, choose_option, json_val=0):\r\n genp = None\r\n gend = None\r\n encoding1, gimage1, encoding2, gimage2 = scan(img1, img2)\r\n similarity = test(encoding1, encoding2)\r\n if choose_option == 3:\r\n genp = gender(gimage1)\r\n gend = gender(gimage2)\r\n elif choose_option == 1:\r\n gend = gender(gimage2)\r\n elif choose_option == 2:\r\n genp = gender(gimage1)\r\n boolean = similarity >= 70.0\r\n if json_val == 0:\r\n json_create(similarity, boolean, genp, gend)\r\n return(similarity, boolean, genp)",
"def compareImageAgainstAnotherImageGetScore_Features(img1, img2, flag_debug):\n\n # parameters\n filterMatchRatio = 0.75\n\n\n # create a detector and matcher object\n detector, matcher = createDetectorMatcher()\n\n # error if no descriptors were created for either image\n features1, descriptors1 = (detector.detectAndCompute(img1, None))\n if descriptors1 is None or not len(descriptors1):\n print \"No features in img1: %d\" % len(features1)\n return 0.0\n features2, descriptors2 = (detector.detectAndCompute(img2, None))\n if descriptors2 is None or not len(descriptors2):\n print \"No features in img2: %d.\" % len(features2)\n return 0.0\n\n # calc matches between features\n raw_matches = matcher.knnMatch(descriptors1, trainDescriptors=descriptors2, k=2)\n p1, p2, matching_feature_pairs = filterMatches(features1, features2, raw_matches, filterMatchRatio)\n\n # now that we have features lined up, we want to see if there is actually a nice homography transform (rotation, scale) that is consistent with bringing features into alignment.\n\n # numpy arrays and constants used below\n origin = numpy.array([0,0,1])\n dx = numpy.array([1,0,1])\n dy = numpy.array([0,1,1])\n\n # default returns\n match_count = 0\n scale_amount = float('Inf')\n \n # We need at least 4 points to align.\n if len(p1)>=4:\n homography_mat, inlier_pt_mask = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)\n if homography_mat is not None:\n match_count = numpy.sum(inlier_pt_mask)\n # Sometimes matching faces are visible but the die is rotated. That is,\n # this die has 5 on top but 19 visible to the side, and the other die\n # has 19 on top but 5 visible. OpenCV may find a match, but the match\n # will not be pure translation/rotation, and will distort scale.\n h = homography_mat\n scale_amount = sum([abs(1.0 - numpy.linalg.norm(h.dot(dv) - h.dot(origin))) for dv in (dx, dy)])\n if scale_amount < 1.0:\n scale_amount = (1.0 / scale_amount if scale_amount > 0 else float('Inf'))\n\n # we may want to test scale_amount and disallow the matches if holography alignment scale is too far from 1.0\n\n return match_count",
"def calculateScore(image1, image2):\n\timage1col = image1[-1]\n\timage2col = image2[0]\n\n\ttuples = zip(image1col, image2col)\n\n\tscore = 0\n\tfor pixel1, pixel2 in tuples:\n\t\tscore += comparePixels(pixel1, pixel2)\n\n\treturn score",
"def matchAnnotatedEventRates(self, annotatedStats):\n matchSets = {} \n outcomeList = self.abstract.entities.getList('outcome')\n for oTemplate in outcomeList:\n for om in oTemplate.unusedNumbers:\n matchSets[om] = []\n\n # check unused annotated numbers for matches\n for annotatedOutcome in self.abstract.annotatedEntities.getList('outcome'):\n for annotatedOM in annotatedOutcome.unusedNumbers:\n if om.matchAnnotatedMentions(annotatedOM):\n annotatedER = annotatedOM.eventRate()\n if annotatedER != None:\n dist = abs(om.eventRate() - annotatedER)\n heapq.heappush(matchSets[om], (dist, annotatedOM))\n if annotatedOM not in matchSets:\n matchSets[annotatedOM] = []\n heapq.heappush(matchSets[annotatedOM], (dist, om))\n # check unmatched annotated ARR stats for matches\n for aSS in annotatedStats.stats:\n if aSS.matchingStat == None:\n if om.matchAnnotatedMentions(aSS.lessEffective):\n annotatedOM = aSS.lessEffective\n elif om.matchAnnotatedMentions(aSS.moreEffective):\n annotatedOM = aSS.moreEffective\n else:\n annotatedOM = None\n if annotatedOM != None:\n dist = abs(om.eventRate() - annotatedOM.eventRate())\n heapq.heappush(matchSets[om], (dist, annotatedOM))\n if annotatedOM not in matchSets:\n matchSets[annotatedOM] = []\n heapq.heappush(matchSets[annotatedOM], (dist, om))\n \n for oTemplate in outcomeList:\n for om in oTemplate.unusedNumbers:\n if len(matchSets[om]) > 0:\n annotatedOM = matchSets[om][0][1]\n if matchSets[annotatedOM][0][1] == om:\n # the annotated and detected stats are the best matches for each other\n om.matchingOM = annotatedOM\n annotatedOM.matchingOM = om\n if matchSets[annotatedOM][0][0] < 0.001:\n om.correctlyMatched = True\n annotatedOM.correctlyMatched = True",
"def test_match_detection_one_image(self):\n for image in (VLIMAGE_ONE_FACE, VLIMAGE_SMALL):\n for detector in self.detectors:\n with self.subTest(detectorType=detector.detectorType):\n detectOne = detector.detectOne(image=image, detect68Landmarks=True)\n batchDetect = detector.detect(images=[image] * 3, detect68Landmarks=True)\n for detection in batchDetect:\n for face in detection:\n assert face.boundingBox.asDict() == detectOne.boundingBox.asDict()\n assert face.landmarks5.asDict() == detectOne.landmarks5.asDict()\n assert face.landmarks68.asDict() == detectOne.landmarks68.asDict()",
"def PAV_detection( cov_per_gene1, cov_per_gene2, PAV_file, anno, cov_cutoff ):\n\t\n\tavg_cov1 = float( np.median( cov_per_gene1.values() ) )\n\tavg_cov2 = float( np.median( cov_per_gene2.values() ) )\n\t\n\t# --- calculate ratios --- #\n\t\n\tratios = {}\n\tnorm_cov_per_gene1 = {}\n\tnorm_cov_per_gene2 = {}\n\tfor key in cov_per_gene1.keys():\n\t\tcov1 = cov_per_gene1[ key ]\n\t\tcov2 = cov_per_gene2[ key ]\n\t\tif cov1+cov2 > cov_cutoff:\n\t\t\tif cov2 == 0:\n\t\t\t\tif cov1 == 0:\n\t\t\t\t\tratios.update( { key: 0 } )\n\t\t\t\telse:\n\t\t\t\t\tcov1 = cov1 / avg_cov1\n\t\t\t\t\tcov2 = 0.01 / avg_cov2\n\t\t\t\t\tratios.update( { key: np.log2( cov1 / cov2 ) } )\t\n\t\t\t\t\tnorm_cov_per_gene1.update( { key: cov1 } )\n\t\t\t\t\tnorm_cov_per_gene2.update( { key: cov2 } )\n\t\t\telse:\n\t\t\t\tif cov1 == 0:\n\t\t\t\t\tcov1 = 0.01 / avg_cov1\n\t\t\t\t\tcov2 = cov2 / avg_cov2\n\t\t\t\telse:\n\t\t\t\t\tcov1 = cov1 / avg_cov1\n\t\t\t\t\tcov2 = cov2 / avg_cov2\n\t\t\t\tratios.update( { key: np.log2( cov1 / cov2 ) } )\n\t\t\t\tnorm_cov_per_gene1.update( { key: cov1 } )\n\t\t\t\tnorm_cov_per_gene2.update( { key: cov2 } )\n\t\n\tmean = np.mean( ratios.values() )\n\tsd = np.std( ratios.values() )\n\t\n\t# --- calculate modified z-scores --- #\n\t\n\tmad = []\n\tratio_med = np.median( ratios.values() )\n\tfor gene in ratios.keys():\n\t\tif ratios[ gene ] > ratio_med:\n\t\t\tmad.append( ratios[ gene ] - ratio_med )\n\t\telse:\n\t\t\tmad.append( ratio_med - ratios[ gene ] )\n\tmad = np.median( mad )\n\tmod_z_scores = {}\n\tfor gene in ratios.keys():\n\t\tmod_z_scores.update( { gene: ( 0.6745 * ( ratios[ gene ] - ratio_med ) ) / mad } )\n\t\n\t# --- sort values and generate output file --- #\n\t\n\tdata_for_sorting = []\n\tfor key in ratios.keys():\n\t\tdata_for_sorting.append( { 'id': key, 'ratio': ratios[ key ], 'z': mod_z_scores[ key ], 'abs_z': abs( mod_z_scores[ key ] ) } )\n\tdata_for_sorting = sorted( data_for_sorting, key=itemgetter('abs_z') )[::-1]\n\t\n\twith open( PAV_file, \"w\" ) as out:\n\t\tout.write( \"GeneID\\tCovS1\\tCovS2\\tlog2(NormCov1/NormCov2)\\tMod_Z-score\\n\" )\n\t\tfor entry in data_for_sorting:\n\t\t\ttry:\n\t\t\t\ttry:\n\t\t\t\t\tout.write( \"\\t\".join( map( str, [ entry['id'], norm_cov_per_gene1[ entry['id'] ], norm_cov_per_gene2[ entry['id'] ], entry['ratio'], entry['z'], anno[ entry['id'] ] ] ) ) + '\\n' )\n\t\t\t\texcept KeyError:\n\t\t\t\t\tout.write( \"\\t\".join( map( str, [ entry['id'], norm_cov_per_gene1[ entry['id'] ], norm_cov_per_gene2[ entry['id'] ], entry['ratio'], entry['z'], \"n/a\" ] ) ) + '\\n' )\n\t\t\texcept KeyError:\n\t\t\t\tpass",
"def perform_comparison(self):\n # Turn the filenames into their absolute equivalents\n file1 = os.path.realpath(self.files[0])\n file2 = os.path.realpath(self.files[1])\n\n # Identify which of the two files is the KGO file\n if self.kgo is not None:\n kgo_file = [file1, file2][self.kgo]\n # If this file is missing, no comparison can be performed; it\n # could be that this task is brand new\n if not os.path.exists(kgo_file):\n self.parent.reporter(\n \"KGO File (file {0}) appears to be missing\"\n .format(self.kgo + 1), prefix=\"[FAIL] \")\n # Note that by exiting early this task counts as failed\n return\n\n # Load them using Mule - if either file doesn't appear to be\n # a recognised file type, this will abort... if it is recognised but\n # fails to validate, a warning will be raised and it may fail later.\n # However rose_ana will catch this and report to the user if needed.\n self.umf1 = mule.load_umfile(file1)\n self.umf2 = mule.load_umfile(file2)\n\n if self.prognostic_only:\n self.select_prognostic_fields()\n\n # Create the comparison object using Mule cumf\n self.compare = UMFileComparison(self.umf1, self.umf2)\n\n # If the comparison was successful, nothing more needs to be done\n if self.compare.match:\n self.passed = True\n # Capture the output from cumf's summary output and put into\n # the rose_ana output\n prefix = \"[INFO] \"\n self.write_output_info(prefix=prefix)\n else:\n # Capture the output from cumf's summary output and put into\n # the rose_ana output\n prefix = \"[FAIL] \"\n self.write_output_info(prefix=prefix)\n\n # Get a reference to the log directory\n log_root = os.path.dirname(os.environ[\"ROSE_TASK_LOG_ROOT\"])\n\n # Create a suitable filename for the cumf output using the\n # task name (so it'll be unique)\n basename = self.get_output_basename()\n\n # Write the full results of the cumf comparison\n self.write_full_output(log_root, basename)\n\n # Write a summary of the field differences\n self.write_summ_output(log_root, basename)",
"def image_detection(self, image):\n\n # start to detect image\n start_time = time.time()\n \n I = np.asarray(image.convert(\"RGB\"))\n I = I[:,:,::-1] # rbg2bgr\n\n detected_result_list = []\n _, _, detections = self.model.predict_on_batch(np.expand_dims(I, axis=0))\n predicted_labels = np.argmax(detections[0, :, 4:], axis=1)\n scores = detections[0, np.arange(detections.shape[1]), 4 + predicted_labels]\n\n for idx, (label_, score) in enumerate(zip(predicted_labels, scores)):\n if score < self.score:\n continue\n b = detections[0, idx, :4].astype(int)\n top, left, bottom, right = (b[1],b[0],b[3], b[2])\n top = int(max(0, np.floor(top + 0.5).astype('int32')))\n left = int(max(0, np.floor(left + 0.5).astype('int32')))\n bottom = int(min(image.size[1], np.floor(bottom + 0.5).astype('int32')))\n right = int(min(image.size[0], np.floor(right + 0.5).astype('int32')))\n print((left, top, right, bottom), self.idx2class[label_], score)\n detected_result_list.append(((left, top, right, bottom), self.idx2class[label_], score))\n\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return detected_result_list"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Plot an overlay of the detected events on the signal.
|
def plot_detection(self):
import matplotlib.pyplot as plt
import ipywidgets as ipy
# Define mask
sf = self._sf
win_size = 10
mask = self.get_mask()
highlight = self._data * mask
highlight = np.where(highlight == 0, np.nan, highlight)
highlight_filt = self._data_filt * mask
highlight_filt = np.where(highlight_filt == 0, np.nan, highlight_filt)
n_epochs = int((self._data.shape[-1] / sf) / win_size)
times = np.arange(self._data.shape[-1]) / sf
# Define xlim and xrange
xlim = [0, win_size]
xrng = np.arange(xlim[0] * sf, (xlim[1] * sf + 1), dtype=int)
# Plot
fig, ax = plt.subplots(figsize=(12, 4))
plt.plot(times[xrng], self._data[0, xrng], "k", lw=1)
plt.plot(times[xrng], highlight[0, xrng], "indianred")
plt.xlabel("Time (seconds)")
plt.ylabel("Amplitude (uV)")
fig.canvas.header_visible = False
fig.tight_layout()
# WIDGETS
layout = ipy.Layout(width="50%", justify_content="center", align_items="center")
sl_ep = ipy.IntSlider(
min=0,
max=n_epochs,
step=1,
value=0,
layout=layout,
description="Epoch:",
)
sl_amp = ipy.IntSlider(
min=25,
max=500,
step=25,
value=150,
layout=layout,
orientation="horizontal",
description="Amplitude:",
)
dd_ch = ipy.Dropdown(
options=self._ch_names, value=self._ch_names[0], description="Channel:"
)
dd_win = ipy.Dropdown(
options=[1, 5, 10, 30, 60],
value=win_size,
description="Window size:",
)
dd_check = ipy.Checkbox(
value=False,
description="Filtered",
)
def update(epoch, amplitude, channel, win_size, filt):
"""Update plot."""
n_epochs = int((self._data.shape[-1] / sf) / win_size)
sl_ep.max = n_epochs
xlim = [epoch * win_size, (epoch + 1) * win_size]
xrng = np.arange(xlim[0] * sf, (xlim[1] * sf), dtype=int)
# Check if filtered
data = self._data if not filt else self._data_filt
overlay = highlight if not filt else highlight_filt
try:
ax.lines[0].set_data(times[xrng], data[dd_ch.index, xrng])
ax.lines[1].set_data(times[xrng], overlay[dd_ch.index, xrng])
ax.set_xlim(xlim)
except IndexError:
pass
ax.set_ylim([-amplitude, amplitude])
return ipy.interact(
update, epoch=sl_ep, amplitude=sl_amp, channel=dd_ch, win_size=dd_win, filt=dd_check
)
|
[
"def plot_events(obj):\n obj.ax.scatter(obj.event_times[obj.current_position], obj.event_values[obj.current_position],marker='.')\n obj.last_position = len(obj.event_values) - 1",
"def plot_prediction_overlay(tile: np.ndarray, prediction: np.ndarray):\n plt.figure()\n plt.imshow(tile)\n plt.show()",
"def plotSpikes(self):\n self.getCompleteSpikeTimes()\n b=np.ones_like(self.completeSpikeTimes)\n matplotlib.pyplot.plot(b)\n matplotlib.pyplot.eventplot(self.spikeTimes)\n matplotlib.pyplot.xlabel(\"time\") \n matplotlib.pyplot.title(\"single neuron raster plot of Neuron \"+self.name)\n matplotlib.pyplot.show()",
"def drawOverlays(self):\r\n\t\tpass",
"def plot(self, signals):\n fig = plt.figure()\n for i in range(len(signals)):\n ax = fig.add_subplot(len(signals), 1, i + 1)\n ax.plot(signals[i])\n plt.show()",
"def visualize(self):\n plt.show()",
"def on_plot_hover(event):\n # Iterating over each data member plotted\n for robot_id, line in enumerate(line_list):\n # Searching which data member corresponds to current mouse position\n if line.contains(event)[0]:\n line.set_alpha(1.0)\n robot_id_text.set_text(f\"Robot {robot_id}\")\n else:\n line.set_alpha(default_line_alpha)",
"def line_chart():\n images = ['image1', 'image2', 'image3', 'image4', 'image5', 'image6', 'image7', 'image8', 'image9', 'image10',\n 'image11', 'image12', 'image13', 'image14', 'image15', 'image16', 'image17', 'image18', 'image19',\n 'image20', 'image21', 'image22', 'image23', 'image24', 'image25']\n true_positives_before = np.array([1,1,2,0,5,3,10,1,1,1,1,0,0,3,0,2,0,1,1,2,3,2,1,0,0])\n true_positives_after = np.array([2,1,2,1,5,3,10,1,1,1,1,0,0,4,2,4,2,2,1,2,3,2,1,0,0])\n plt.plot(images, true_positives_before, color='red', marker='o', label='Before Augmentation')\n plt.plot(images, true_positives_after, color='blue', marker='o', label='After Augmentation')\n plt.title('Comparison between objects detected before and after augmentation', fontsize=14)\n plt.xlabel('Image name', fontsize=14)\n plt.ylabel('Number of objects detected correctly', fontsize=14)\n plt.legend(loc='best')\n plt.xticks(rotation=45)\n plt.grid(True)\n plt.show()",
"def plotting(eventfile,segment_length,demod,tbin,threshold,PI1,PI2,t1,t2,starting_freq,W,hist_min_sig,N,xlims,plot_mode):\n if demod != True and demod != False:\n raise ValueError(\"demod should either be True or False!\")\n if plot_mode != \"show\" and plot_mode != \"save\":\n raise ValueError(\"plot_mode should either be 'show' or 'save'!\")\n\n parent_folder = str(pathlib.Path(eventfile).parent)\n\n f,ps,ps_bins,N_greaterthanP,M = average_ps(eventfile,segment_length,demod,tbin,threshold,PI1,PI2,t1,t2,starting_freq,W)\n\n power_required_3 = Lv3_detection_level.power_for_sigma(3,N,M,W) #power required for significance\n power_required_4 = Lv3_detection_level.power_for_sigma(4,N,M,W) #power required for significance\n\n ### to create the histogram of pulsation candidates\n ps_sig = Lv3_detection_level.signal_significance(N,M,W,ps)\n\n if PI1 == '':\n output_file = open(parent_folder + '/S' + str(segment_length) + '_W' + str(W) + '_T' + str(threshold) + '_t1t2_' + str(t1) + '-' + str(t2) + '.txt','w')\n else:\n output_file = open(parent_folder + '/S' + str(segment_length) + '_W' + str(W) + '_T' + str(threshold) + '_E' + str(PI1) + '-' + str(PI2) + '_t1t2_' + str(t1) + '-' + str(t2) + '.txt','w')\n cand_f = f[ps_sig>=hist_min_sig] #decided not to use hist_min_f ; otherwise I get empty files...\n cand_ps = ps_sig[ps_sig>=hist_min_sig]\n for i in range(len(cand_f)):\n output_file.write(str(cand_f[i]) + ' ' + str(cand_ps[i]) + '\\n')\n output_file.close()\n\n plt.figure(num=1,figsize=(10,5.63))\n plt.errorbar(x=f,y=ps,color='r',drawstyle='steps-mid')\n plt.axhline(y=power_required_3,lw=0.8,alpha=0.5,color='b')\n plt.axhline(y=power_required_4,lw=0.8,alpha=0.5,color='k')\n plt.axhline(y=2,lw=0.8,alpha=0.5,color='k',linestyle='--')\n plt.xlabel('Frequency (Hz)',fontsize=12)\n plt.ylabel('Leahy-normalized power',fontsize=12)\n plt.xscale('log')\n plt.yscale('log')\n plt.ylim([1,min(20.0,3*power_required_4)])\n plt.xlim([0.001,1/(2*tbin)])\n if len(xlims) != 0:\n plt.xlim([xlims[0],xlims[1]])\n #plt.axvline(x=271.453,lw=0.5,alpha=0.5)\n plt.title('PI: ' + str(PI1)+'-'+str(PI2) + '; W = ' + str(W) + ', Threshold = ' + str(threshold) + '%' + '\\n' + 't1 = ' + str(t1) + ', t2 = ' + str(t2) + ' ; Segment Length: ' + str(segment_length) + 's, No. Segments = ' + str(M) + '\\n' + 'Demodulated: ' + str(demod) + ' ; St.D = ' + str(np.std(ps)), fontsize=12)\n plt.legend(('Power Spectrum','3 sigma','4 sigma','Poisson noise'),loc='best')\n if plot_mode == \"save\":\n if PI1 != '':\n energy_suffix = '_E' + str(PI1).zfill(4) + '-' + str(PI2).zfill(4)\n else:\n energy_suffix = ''\n if demod == True:\n demod_suffix = '_demod'\n else:\n demod_suffix = ''\n plt.savefig(parent_folder + '/' + str(segment_length) + 's_average_ps_W' + str(W) + '_T' + str(threshold) + demod_suffix + energy_suffix + '_t1t2_' + str(t1) + '-' + str(t2) + '.pdf',dpi=900)\n plt.close()\n\n plt.figure(2)\n plt.semilogy(ps_bins,N_greaterthanP,'rx')\n plt.xlabel('Leahy-normalized power',fontsize=12)\n plt.ylabel('log[N(>P)]',fontsize=12)\n plt.title('Energy range: ' + str(PI1) + ' - ' + str(PI2) + ', W = ' + str(W),fontsize=12)\n if plot_mode == \"save\":\n if PI1 != '':\n energy_suffix = '_E' + str(PI1).zfill(4) + '-' + str(PI2).zfill(4)\n else:\n energy_suffix = ''\n if demod == True:\n demod_suffix = '_demod'\n else:\n demod_suffix = ''\n plt.savefig(parent_folder + '/' + str(segment_length) + 's_noise_hist_W' + str(W) + '_T' + str(threshold) + demod_suffix + energy_suffix + '_t1t2_' + str(t1) + '-' + str(t2) + '.pdf',dpi=900)\n plt.close()\n\n if plot_mode == \"show\":\n plt.show()",
"def overlayImageBeads(movie_name, beads_locs_name, frame_number, sx = 8, sy = 8):\n \n frame = datareader.inferReader(movie_name).loadAFrame(frame_number).astype(numpy.float64)\n frame = frame - numpy.min(frame)\n frame = frame/numpy.max(frame)\n\n bead_locs = numpy.loadtxt(beads_locs_name)\n locs = {\"x\" : bead_locs[:,0],\n \"y\" : bead_locs[:,1]}\n \n fig = pyplot.figure(figsize = (sx, sy))\n ax = fig.add_subplot(1,1,1)\n ax.imshow(frame, interpolation = 'nearest', cmap = \"gray\")\n for i in range(locs[\"x\"].size):\n width = 10\n height = 10\n ellipse = patches.Ellipse((locs[\"x\"][i], locs[\"y\"][i]), width, height, facecolor='none', edgecolor='g', linewidth = 2)\n ax.add_artist(ellipse)\n \n #ax.scatter(locs[\"x\"], locs[\"y\"], s = 200,\n ax.set_title(\"Overlay Image\")\n\n pyplot.show()",
"def draw_area(self, events):\r\n self.current_points_list.append((events.x, events.y))\r\n\r\n self.create_oval(events.x - 1, events.y - 1, events.x + 1, events.y + 1, fill=\"yellow\", tags='indicator')",
"def plot_data(self):\n #TODO: implement time axis scale\n plt.title(\"Event #{} voltage\".format(self._event_number))\n plt.xlabel(\"time [ns]\")\n plt.ylabel(\"voltage [V]\")\n plt.plot(self._raw_data)\n plt.show()",
"def plot_mask(self):\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n im = ax.imshow(self.mask)",
"def ShowSpots(image,spot_mask):\n fig, axes = plt.subplots(nrows = 1, ncols = 2, figsize = (20,10))\n axes[0].imshow(image, cmap = 'gray')\n axes[1].imshow(image, cmap = 'gray')\n axes[1].imshow(np.ma.array(spot_mask, mask = spot_mask==0), \n cmap = 'flag', alpha = 0.5)\n axes[0].title.set_text('original image')\n axes[1].title.set_text('overlay spots')\n plt.tight_layout()\n plt.show()\n return",
"def hover(fig, scatter, annot, ax, event, norm, cmap,\n labels, mesh_ids, classes):\n vis = annot.get_visible()\n if event.inaxes == ax:\n cont, ind = scatter.contains(event)\n if cont:\n update_annot(scatter, annot, ind, norm,\n cmap, labels, mesh_ids, classes)\n annot.set_visible(True)\n fig.canvas.draw_idle()\n else:\n if vis:\n annot.set_visible(False)\n fig.canvas.draw_idle()",
"def _plot(self, **kwargs): # pragma: no cover\n if self.wrapper.ndim > 1:\n raise TypeError(\"Select a column first. Use indexing.\")\n\n return self.close.rename('close').vbt.overlay_with_heatmap(self.labels.rename('labels'), **kwargs)",
"def visualize(X, Y):\n plt.plot(X, Y, \"bx\")\n plt.show()",
"def plot_data(self, figsize):\n fig, ax = plt.subplots(1, figsize=figsize)\n ax.set_title(self.filename, wrap=True)\n image = self.waterfall_image\n if self.normalize_image:\n image = utils.normalize_waterfall_image(image)\n ax.imshow(image)\n highlight, = ax.plot([], [], 'o', color=self.highlight_color, ms=5)\n return fig, ax, highlight",
"def anomaly_plot(self, title='Figure'):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a summary of the spindles detection, optionally grouped across channels and/or stage.
|
def summary(self, grp_chan=False, grp_stage=False, mask=None, aggfunc="mean", sort=True):
return super().summary(
event_type="spindles",
grp_chan=grp_chan,
grp_stage=grp_stage,
aggfunc=aggfunc,
sort=sort,
mask=mask,
)
|
[
"def add_pruning_summaries(self):\n with tf.name_scope(self._spec.name + '_summaries'):\n tf.summary.scalar('sparsity', self._sparsity)\n tf.summary.scalar('last_mask_update_step', self._last_update_step)\n masks = get_masks()\n thresholds = get_thresholds()\n for mask, threshold in zip(masks, thresholds):\n if not self._exists_in_do_not_prune_list(mask.name):\n tf.summary.scalar(mask.op.name + '/sparsity',\n tf.nn.zero_fraction(mask))\n tf.summary.scalar(threshold.op.name + '/threshold', threshold)",
"def _analyze_sparse_noise_frames(sn_grp):\n\n if sn_grp['stim_name'].value != 'SparseNoise':\n raise NameError('The input stimulus should be \"SparseNoise\".')\n\n frames = sn_grp['data'].value\n frames = [tuple(x) for x in frames]\n dtype = [('isDisplay', int), ('azimuth', float), ('altitude', float), ('sign', int), ('isOnset', int)]\n frames = np.array(frames, dtype=dtype)\n\n all_squares = []\n for i in range(len(frames)):\n if frames[i]['isDisplay'] == 1 and \\\n (i == 0 or (frames[i - 1]['isOnset'] == -1 and frames[i]['isOnset'] == 1)):\n all_squares.append(np.array((i, frames[i]['azimuth'], frames[i]['altitude'], frames[i]['sign']),\n dtype=np.float32))\n\n all_squares = np.array(all_squares)\n\n pooled_squares = {}\n unique_squares = list(set([tuple(x[1:]) for x in all_squares]))\n for i, unique_square in enumerate(unique_squares):\n curr_square_n = 'square_' + ft.int2str(i, 5)\n curr_azi = unique_square[0]\n curr_alt = unique_square[1]\n curr_sign = unique_square[2]\n curr_onset_ind = []\n for j, give_square in enumerate(all_squares):\n if np.array_equal(give_square[1:], unique_square):\n curr_onset_ind.append(j)\n pooled_squares.update({curr_square_n: {'azi': curr_azi,\n 'alt': curr_alt,\n 'sign': curr_sign,\n 'onset_ind': curr_onset_ind}})\n all_squares = np.array(all_squares)\n data_format = ['display frame indices for the onset of each square', 'azimuth of each square',\n 'altitude of each square', 'sign of each square']\n description = 'TimeSeries of sparse noise square onsets. Stimulus generated by ' \\\n 'corticalmapping.VisualStim.SparseNoise class.'\n return all_squares, data_format, description, pooled_squares",
"def mask_sparsity_summaries(masks_list, mask_op_names):\n with tf.name_scope('sparsity'):\n total_sparsity, total_nnz, mask_sps = calculate_mask_sparsities(masks_list, mask_op_names)\n for sps in mask_sps:\n tf.summary.scalar(*sps)\n tf.summary.scalar('total_nnz', total_nnz)\n tf.summary.scalar('total_sparsity', total_sparsity)\n return total_sparsity",
"def summary(self, grp_chan=False, grp_stage=False, mask=None, aggfunc=\"mean\", sort=True):\n return super().summary(\n event_type=\"sw\",\n grp_chan=grp_chan,\n grp_stage=grp_stage,\n aggfunc=aggfunc,\n sort=sort,\n mask=mask,\n )",
"def count_sparsity(self):\n w = self.layers[0].weight.cpu().detach().numpy()\n\n num_sparse_features = 0\n num_sparse_groups = 0\n \n \n for group_id in self.group_idx.keys():\n curr_group_is_sparse = True\n \n for feature_id in self.group_idx[group_id]:\n curr_feature_is_sparse = True\n for parameter in w[:, feature_id]:\n if abs(parameter) > TOL:\n curr_feature_is_sparse = False\n break\n \n if curr_feature_is_sparse:\n num_sparse_features += 1\n else:\n curr_group_is_sparse = False\n break\n \n # Make sure the group is sparse here\n if curr_group_is_sparse:\n num_sparse_groups += 1\n self.set_group_to_sparse(group_id)\n \n return num_sparse_features, num_sparse_groups",
"def make_fs_summary(selector_pipe):\n tag_df = pd.DataFrame({\"predictor\": selector_pipe[0].feature_names_in_})\n for selector_name, selector in selector_pipe.named_steps.items():\n if hasattr(selector, \"support_\"):\n feature_in = selector.feature_names_in_\n to_drop = list(set(feature_in) - set(selector.get_feature_names_out()))\n tag_df[selector_name] = np.where(\n tag_df[\"predictor\"].isin(to_drop), 0, 1\n ) * np.where(tag_df[\"predictor\"].isin(feature_in), 1, np.nan)\n else:\n tag_df[selector_name] = np.nan\n\n style = (\n tag_df.style.apply(highlight_discarded, subset=tag_df.columns[1:])\n .applymap(lambda x: \"\" if x == x else \"background-color: #f57505\")\n .format(precision=0)\n )\n\n return style",
"def _groupsInfo(self, nbFeatures):\n nbGroups = len(self.getFilters())*len(self._convolExtractor.getPoolers())\n if self.isImageIncluded():\n nbGroups += len(self._convolExtractor.getPoolers())\n nbFeaturePerGroup = nbFeatures // nbGroups\n return nbFeatures, nbGroups, nbFeaturePerGroup",
"def ssd_process_frame(img, select_threshold=0.5, nms_threshold=0.2, overlap_threshold=0.4, smoothing=0.25):\n # Resize image to height 300.\n factor = 300. / float(img.shape[0])\n img = cv2.resize(img, (0, 0), fx=factor, fy=factor)\n # Run SSD network and get class prediction and localization.\n rpredictions, rlocalisations = isess.run([predictions, localisations], feed_dict={img_input: img})\n\n # Get anchor boxes for this image shape.\n ssd.update_feature_shapes(rpredictions)\n anchors = ssd.anchors(img.shape, dtype=np.float32)\n\n # Compute classes and bboxes from the net outputs: decode SSD output.\n rclasses, rscores, rbboxes, rlayers, ridxes = ssd_common.ssd_bboxes_select(\n rpredictions, rlocalisations, anchors,\n threshold=select_threshold, img_shape=img.shape, num_classes=ssd.params.num_classes, decode=True)\n\n # Remove other classes than cars.\n idxes = (rclasses == 1)\n rclasses = rclasses[idxes]\n rscores = rscores[idxes]\n rbboxes = rbboxes[idxes]\n # Sort boxes by score.\n rclasses, rscores, rbboxes = ssd_common.bboxes_sort(rclasses, rscores, rbboxes,\n top_k=400, priority_inside=True, margin=0.0)\n # Apply NMS.\n rclasses, rscores, rbboxes = bboxes_nms_intersection_avg(rclasses, rscores, rbboxes,\n threshold=nms_threshold)\n # Update cars collection.\n n_frames = 15\n ssd_process_frame.cars = update_car_collection(ssd_process_frame.cars, rscores, rbboxes,\n overlap_threshold, smoothing, n_frames=n_frames)\n\n # Draw bboxes\n cbboxes = [c.bbox for c in ssd_process_frame.cars if c.n_frames > n_frames - 5]\n cindexes = [c.idx for c in ssd_process_frame.cars if c.n_frames > n_frames - 5]\n if len(cbboxes):\n cbboxes = np.stack(cbboxes)\n cindexes = np.stack(cindexes)\n bboxes_draw_on_img(img, cindexes, cbboxes, colors_tableau, thickness=2)\n return img",
"def make_catalog(datas, lvl=4, wave=True, segmentation_map=False, maskthresh=10.0, object_limit=100000):\n \n if type(datas) is np.ndarray:\n hr_images = datas / np.sum(datas, axis=(1, 2))[:, None, None]\n # Detection image as the sum over all images\n detect_image = np.sum(hr_images, axis=0)\n else:\n data_lr, data_hr = datas\n # Create observations for each image\n # Interpolate low resolution to high resolution\n interp = interpolate(data_lr, data_hr)\n # Normalization of the interpolate low res images\n interp = interp / np.sum(interp, axis=(1, 2))[:, None, None]\n # Normalisation of the high res data\n hr_images = data_hr.images / np.sum(data_hr.images, axis=(1, 2))[:, None, None]\n # Detection image as the sum over all images\n detect_image = np.sum(interp, axis=0) + np.sum(hr_images, axis=0)\n detect_image *= np.sum(data_hr.images)\n if np.size(detect_image.shape) == 4:\n if wave:\n # Wavelet detection in the first three levels\n wave_detect = Starlet(detect_image.mean(axis=0), lvl=5).coefficients\n wave_detect[:, -1, :, :] = 0\n detect = scarlet.Starlet(coefficients=wave_detect).image\n else:\n # Direct detection\n detect = detect_image.mean(axis=0)\n else:\n if wave:\n wave_detect = scarlet.Starlet(detect_image).coefficients\n detect = wave_detect[0][0] + wave_detect[0][1] + wave_detect[0][2]\n else:\n detect = detect_image\n \n bkg = sep.Background(detect)\n # Set the limit on the number of sub-objects when deblending.\n sep.set_sub_object_limit(object_limit)\n \n # Extract detection catalog with segmentation maps!\n # Can use this to retrieve ellipse params\n catalog = sep.extract(detect, lvl, err=bkg.globalrms, segmentation_map=segmentation_map, maskthresh=maskthresh)\n \n # Estimate background\n if type(datas) is np.ndarray:\n bkg_rms = scarlet.wavelet.mad_wavelet(datas)\n else:\n bkg_rms = []\n for data in datas:\n bkg_rms.append(scarlet.wavelet.mad_wavelet(data.images))\n \n return catalog, bkg_rms",
"def dump_ball_counts(self) -> None:\n self.info_log(\"Known balls: %s\", self.num_balls_known)\n for device in self.machine.ball_devices.values():\n self.info_log(\"%s contains %s balls. Tags %s\", device.name, device.balls, device.tags)",
"def board_summary(self):\n return self._call_summary(GxFpga.GxFpgaGetBoardSummary)",
"def pipeline(self, img):\n\n self.frame_count += 1\n\n unit_detections = self.detector.get_detections(img) # measurement\n\n unit_trackers = []\n\n for trk in self.tracker_list:\n unit_trackers.append(trk.unit_object)\n\n matched, unmatched_dets, unmatched_trks = self.assign_detections_to_trackers(unit_trackers, unit_detections,\n iou_thrd=0.3)\n\n LOGGER.debug('Detection: ' + str(unit_detections))\n LOGGER.debug('x_box: ' + str(unit_trackers))\n LOGGER.debug('matched:' + str(matched))\n LOGGER.debug('unmatched_det:' + str(unmatched_dets))\n LOGGER.debug('unmatched_trks:' + str(unmatched_trks))\n\n # Matched Detections\n for trk_idx, det_idx in matched:\n z = unit_detections[det_idx].box\n z = np.expand_dims(z, axis=0).T\n tmp_trk = self.tracker_list[trk_idx]\n tmp_trk.predict_and_update(z)\n xx = tmp_trk.x_state.T[0].tolist()\n xx = [xx[0], xx[2], xx[4], xx[6]]\n unit_trackers[trk_idx].box = xx\n unit_trackers[trk_idx].class_id = unit_detections[det_idx].class_id\n tmp_trk.unit_object = unit_trackers[trk_idx]\n tmp_trk.hits += 1\n tmp_trk.no_losses = 0\n\n # Unmatched Detections\n for idx in unmatched_dets:\n z = unit_detections[idx].box\n z = np.expand_dims(z, axis=0).T\n tmp_trk = self.tracker() # Create a new tracker\n x = np.array([[z[0], 0, z[1], 0, z[2], 0, z[3], 0]]).T\n tmp_trk.x_state = x\n tmp_trk.predict_only()\n xx = tmp_trk.x_state\n xx = xx.T[0].tolist()\n xx = [xx[0], xx[2], xx[4], xx[6]]\n tmp_trk.unit_object.box = xx\n tmp_trk.unit_object.class_id = unit_detections[idx].class_id\n tmp_trk.tracking_id = self.track_id_list.popleft() # assign an ID for the tracker\n self.tracker_list.append(tmp_trk)\n unit_trackers.append(tmp_trk.unit_object)\n\n # Unmatched trackers\n for trk_idx in unmatched_trks:\n tmp_trk = self.tracker_list[trk_idx]\n tmp_trk.no_losses += 1\n tmp_trk.predict_only()\n xx = tmp_trk.x_state\n xx = xx.T[0].tolist()\n xx = [xx[0], xx[2], xx[4], xx[6]]\n tmp_trk.unit_object.box = xx\n unit_trackers[trk_idx] = tmp_trk.unit_object\n\n # The list of tracks to be annotated\n good_tracker_list = []\n for trk in self.tracker_list:\n if (trk.hits >= self.min_hits) and (trk.no_losses <= self.max_age):\n good_tracker_list.append(trk)\n img = utils.drawing.draw_box_label(img, trk, self.detector.class_names)\n\n # Manage Tracks to be deleted\n deleted_tracks = filter(lambda x: x.no_losses > self.max_age, self.tracker_list)\n\n for trk in deleted_tracks:\n self.track_id_list.append(trk.tracking_id)\n\n self.tracker_list = [x for x in self.tracker_list if x.no_losses <= self.max_age]\n\n return img",
"def summary(self, grp_stage=False, mask=None, aggfunc=\"mean\", sort=True):\n # ``grp_chan`` is always False for REM detection because the\n # REMs are always detected on a combination of LOC and ROC.\n return super().summary(\n event_type=\"rem\",\n grp_chan=False,\n grp_stage=grp_stage,\n aggfunc=aggfunc,\n sort=sort,\n mask=mask,\n )",
"def get_features(self, detection):\n features = np.zeros((1, 0), dtype=np.float32)\n if 'bdif' in self.label_config[\"features\"]:\n features = np.append(features, detection.bbox)\n features = np.append(features, detection.bbox)\n if 'bbox' in self.label_config[\"features\"]:\n features = np.append(features, detection.bbox)\n if 'brel' in self.label_config[\"features\"]:\n cam_id, _ = self.data_provider.cam_and_time(detection)\n imsize = self.data_provider.cam_size[cam_id]\n tmp = detection.bbox\n tmp[0, 0] /= imsize[0]\n tmp[0, 2] /= imsize[0]\n tmp[0, 1] /= imsize[1]\n tmp[0, 3] /= imsize[1]\n features = np.append(features, tmp)\n if 'conf' in self.label_config[\"features\"]:\n features = np.append(features, detection.confidence)\n\n social_cnt = 0\n if 'soc1' in self.label_config[\"features\"]:\n social_cnt = 1\n if 'soc3' in self.label_config[\"features\"]:\n social_cnt = 3\n if 'soc5' in self.label_config[\"features\"]:\n social_cnt = 5\n\n dens = np.zeros((1, 3), dtype=np.flot32)\n\n if social_cnt > 0 or 'dens' in self.label_config[\"features\"]:\n if detection not in self.social.keys():\n self.social[detection] = np.zeros((1, 3 * social_cnt))\n if detection.time not in self.det_by_time.keys():\n pass\n else:\n neighbours = np.asarray(\n self.det_by_time[detection.time])\n if len(neighbours) == 0:\n pass\n else:\n dx = neighbours[:, 0] - \\\n detection.bbox[0, 0] - \\\n detection.bbox[0, 2] * 0.5\n dy = neighbours[:, 1] - \\\n detection.bbox[0, 1] - \\\n detection.bbox[0, 3] * 0.5\n dd = dx**2 + dy**2\n if 'dens' in self.label_config[\"features\"]:\n dds = sorted(list(dd.reshape((-1,))))\n if len(dds) < 20:\n dds += [0] * (20 - len(dds))\n dens[0, 0] = dds[0]\n dens[0, 1] = dds[4]\n dens[0, 2] = dds[19]\n\n for rep in range(min(len(neighbours), social_cnt)):\n who = np.argmin(dd)\n self.social[detection][0, 3*rep:3*rep+3] =\\\n np.asarray([dx[who],\n dy[who],\n neighbours[who, -1]])\n dd[who] = 1e10\n\n features = np.append(features, self.social[detection])\n\n if 'dens' in self.label_config[\"features\"]:\n features = np.append(features, dens)\n\n if 'intp' in self.label_config[\"features\"]:\n if not hasattr(detection, \"interpolated\"):\n detection.interpolated = False\n features = np.append(features,\n np.asarray([detection.interpolated]))\n if 'appr' in self.label_config[\"features\"]:\n features = np.append(features, self.app_feat(detection))\n\n detection.features = features",
"def summaries(self):\n with tf.name_scope('summaries'), tf.device('/cpu:0'):\n # Define summaries\n tml = tf.summary.scalar(name='training_loss', tensor=self.loss_avg)\n vml = tf.summary.scalar(name='validation_loss', tensor=self.loss_avg)\n lreg = tf.summary.scalar(name='l2_regularized_loss', tensor=self.loss_regularized)\n lr = tf.summary.scalar(name='learning_rate', tensor=self.lr)\n train_list = [tml, lreg, lr]\n valid_list = [vml]\n train_summaries = tf.summary.merge(\n self.encoder_a.summary_list + self.encoder_b.summary_list + train_list, name='train_summaries')\n valid_summaries = tf.summary.merge(valid_list, name='valid_summaries')\n return train_summaries, valid_summaries",
"def sense(self):\n\n out = {}\n\n # retrieve depth data\n result, resolution, data = vrep.simxGetVisionSensorDepthBuffer(self._clientID,\n self.sensors_handles['kinect_depth'],\n self._operation_mode)\n if result != vrep.simx_return_ok: # checking the reading result.\n exit(result)\n\n # get clean depth data\n out['depth'] = self.get_depth(data) # appending the distance depth.\n\n # retrieve vision sensor image\n result_vision, resolution, image = vrep.simxGetVisionSensorImage(self._clientID,\n self.sensors_handles['kinect_rgb'],\n 0,\n vrep.simx_opmode_blocking)\n # retrieve vision sensor filtered image (blob)\n result_blob, t0, t1 = vrep.simxReadVisionSensor(self._clientID,\n self.sensors_handles['kinect_rgb'],\n vrep.simx_opmode_blocking)\n\n # extract blob data\n out['vision'] = self.get_vision(resolution, image, t1)\n\n # get load status\n out['load'] = self._load\n\n self._term.write(\"sensed: {}\".format(out))\n\n return out",
"def list_detectors(self, mosaic=False):\n return None",
"def Summarize(self):\n top = Toplevel()\n top.title('SPEX::PREVIEW')\n top.geometry(\"400x300\")\n frameSummarize = LabelFrame(top, relief=RAISED, borderwidth=2)\n frameSummarize.pack(side=TOP, expand=True)\n\n # textSummarize = ['Spectrum or Image File Summary: ', 'Data Type: ', 'Time Bins:', 'Time range:', '#Energy Bins: ',\n # 'Area: ', 'Detectors Used: ', 'Response Info: ']\n # for each section call the parameters from header and data\n txt = [\"\\n\\n\\nSpectrum or Image File Summary\",\n \"\\nData Type: \", self.summarizeData[2], \n \"\\nFile name: \", self.name,\n \"\\n#Time Bins: \", self.time_len, \"Time range: \", self.timeData[0], 'to', self.timeData[1],\n \"\\n#Energy Bins: \", len(self.summarizeData[4]),\n \"Energy range: \", self.summarizeData[5], 'to', self.summarizeData[6],\n \"\\nArea: \", self.summarizeData[0],\n \"\\nDetectors Used: \", self.summarizeData[1],\n \"\\nResponse Info: \", self.name]\n list = Text(frameSummarize)\n list.insert(END, txt)\n list.pack()",
"def get_flop_stats(model, cfg, is_train):\n rgb_dimension = 1\n # if is_train:\n # input_tensors = torch.rand(\n # rgb_dimension,\n # cfg.DATA.NUM_FRAMES,\n # cfg.DATA.TRAIN_CROP_SIZE,\n # cfg.DATA.TRAIN_CROP_SIZE,\n # )\n # else:\n # input_tensors = torch.rand(\n # rgb_dimension,\n # cfg.DATA.NUM_FRAMES,\n # cfg.DATA.TEST_CROP_SIZE,\n # cfg.DATA.TEST_CROP_SIZE,\n # )\n input_tensors=torch.rand(\n 1,16,192,128\n )\n flop_inputs = input_tensors\n for i in range(len(flop_inputs)):\n flop_inputs[i] = flop_inputs[i].unsqueeze(0).cuda(non_blocking=True)\n\n # If detection is enabled, count flops for one proposal.\n\n inputs = (flop_inputs,)\n\n gflop_dict, _ = flop_count(model, inputs)\n gflops = sum(gflop_dict.values())\n return gflops"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the (scaled) coincidence matrix.
|
def get_coincidence_matrix(self, scaled=True):
return super().get_coincidence_matrix(scaled=scaled)
|
[
"def influence_matrix(self) -> np.ndarray:",
"def get_membership_matrix(self):\n import numpy as np\n matrix = []\n for i in self.clusters:\n matrix.append(self.clusters[i]['indicator'])\n matrix = np.array(matrix)\n return matrix",
"def generate_cnk_matrix(self):\r\n total = self.rator_number\r\n cnk_matrix = np.zeros((total - 1, total))\r\n\r\n for column in range(total):\r\n cnk_matrix[:, column] = np.concatenate((np.where(self.combination_list[:, 0] == column)[0],\r\n np.where(self.combination_list[:, 1] == column)[0]))\r\n\r\n return cnk_matrix.astype(int)",
"def cofactor_matrix(self):\n return Matrix(self.row_n, self.col_n, [self.cofactor(i, j) for i in range(1, self.row_n + 1) for j in range(1, self.col_n + 1)])",
"def _matrix_(self):\n return self.to_matrix()",
"def incidence_matrix(labels):\n Npts = len(labels)\n incidence_matrix = np.zeros((Npts,Npts))\n for i in range(Npts):\n for j in range(Npts):\n if labels[i] == labels[j]:\n incidence_matrix[i][j] = 1\n else:\n incidence_matrix[i][j] = 0\n return(incidence_matrix)",
"def contact_matrix(self):\t\t\n\t\tJp = self.J.view(self.L, self.L)\n\t\tS_FN = torch.sqrt(Jp*Jp)\n\n\t\treturn S_FN.data",
"def get_conformers_distmatrix (self) :\n ncomformers = len(self.geometries)\n for i,(en,b) in enumerate(zip(self.energies,self.bconsts)) :\n for j in range(i+1,len(self.geometries)) :\n dist = self.get_overlap_with_conformer(j,en,b)\n dist_matrix[i,j] = dist\n # Symmetrize\n dist_matrix = dist_matrix + dist_matrix.transpose()\n return dist_matrix",
"def get_pairwise_chi_matrix(self):\n\n\t\treturn self._pairwise_chi_matrix",
"def get_full_matrix(correlations):\n n = correlations.shape[1]\n matrix = np.zeros((n,n), dtype=np.uint8)\n for i in range(n):\n for j in range(correlations.shape[0]):\n if correlations[j,i] == 1:\n col = i+j+1\n if col < n and col >= 0:\n matrix[i,col] = 1\n matrix[col,i] = 1\n return matrix",
"def calc_overlap_matrix(self, points=None):",
"def get_kc_mat(self):\n if use_opt_einsum:\n kcmat1 = contract('j,l, ijkl->ik', self.pv, self.pv, self.etensor.Cijkl)\n else:\n kcmat1 = np.einsum('j,l, ijkl->ik', self.pv, self.pv, self.etensor.Cijkl)\n ###\n # kcmat2 = np.dot(self.pv, np.dot(self.pv, self.etensor.Cijkl))\n # if not np.allclose(kcmat1, kcmat2): raise ValueError('Error Christoffel Matrix')\n ###\n self.kcmat=kcmat1\n return",
"def covarMatrix(x):\n return np.matrix(x - np.mean(x, axis=0)[np.newaxis, :]).T * np.matrix(x - np.mean(x, axis=0)[np.newaxis, :])",
"def _get_conf_mat(self):\n conf_clean, conf_cat = read_confounds(self.confounds)\n conf_mat = pd.get_dummies(self.data[conf_clean], columns=conf_cat, \n drop_first=True)\n return conf_mat.to_numpy()",
"def EC_matrix(self):\n Cmat = np.zeros((2, 2))\n CJ1 = 1. / (2 * self.ECJ1) # capacitances in units where e is set to 1\n CJ2 = 1. / (2 * self.ECJ2)\n CJ3 = 1. / (2 * self.ECJ3)\n Cg1 = 1. / (2 * self.ECg1)\n Cg2 = 1. / (2 * self.ECg2)\n\n Cmat[0, 0] = CJ1 + CJ3 + Cg1\n Cmat[1, 1] = CJ2 + CJ3 + Cg2\n Cmat[0, 1] = -CJ3\n Cmat[1, 0] = -CJ3\n\n return np.linalg.inv(Cmat) / 2.",
"def get_inner_matrix(self):\n return self.matrix",
"def get_similarity_matrix(self, c, q):\n c_len, q_len = c.size(1), q.size(1)\n c = F.dropout(c, self.drop_prob, self.training) # (bs, c_len, hid_size)\n q = F.dropout(q, self.drop_prob, self.training) # (bs, q_len, hid_size)\n # Shapes: (batch_size, c_len, q_len)\n s0 = torch.matmul(c, self.c_weight).expand([-1, -1, q_len])\n s1 = torch.matmul(q, self.q_weight).transpose(1, 2)\\\n .expand([-1, c_len, -1])\n s2 = torch.matmul(c * self.cq_weight, q.transpose(1, 2))\n s = s0 + s1 + s2 + self.bias\n\n return s",
"def correlation_matrix(trains):\r\n# -----------------------------------------------------------------------------\r\n result = [[0 for i in xrange(len(trains))] for j in xrange(len(trains))]\r\n for i in xrange(len(trains)):\r\n print i\r\n for j in xrange(len(trains)):\r\n if i>=j:\r\n corr = sum(np.array(trains[i])*np.array(trains[j]))*2.0/(sum(np.array(trains[i]))+sum(np.array(trains[j])))\r\n result[i][j] = corr\r\n result[j][i] = corr\r\n return result",
"def get_similarity_matrix(self, c, q):\n p_len, q_len = c.size(1), q.size(1)\n # (bs, p_len, hid_size)\n c = F.dropout(c, self.drop_prob, self.training)\n # (bs, q_len, hid_size)\n q = F.dropout(q, self.drop_prob, self.training)\n\n # Shapes: (batch_size, p_len, q_len)\n s0 = torch.matmul(c, self.p_weight).expand([-1, -1, q_len])\n s1 = torch.matmul(q, self.q_weight).transpose(1, 2)\\\n .expand([-1, p_len, -1])\n s2 = torch.matmul(c * self.cq_weight, q.transpose(1, 2))\n s = s0 + s1 + s2 + self.bias\n\n return s"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a summary of the SW detection, optionally grouped across channels and/or stage.
|
def summary(self, grp_chan=False, grp_stage=False, mask=None, aggfunc="mean", sort=True):
return super().summary(
event_type="sw",
grp_chan=grp_chan,
grp_stage=grp_stage,
aggfunc=aggfunc,
sort=sort,
mask=mask,
)
|
[
"def board_summary(self):\n return self._call_summary(GxFpga.GxFpgaGetBoardSummary)",
"def summariseSuiteResult(self, suite):",
"def summary(self, grp_chan=False, grp_stage=False, mask=None, aggfunc=\"mean\", sort=True):\n return super().summary(\n event_type=\"spindles\",\n grp_chan=grp_chan,\n grp_stage=grp_stage,\n aggfunc=aggfunc,\n sort=sort,\n mask=mask,\n )",
"def summaries(self):\n with tf.name_scope('summaries'), tf.device('/cpu:0'):\n # Define summaries\n tml = tf.summary.scalar(name='training_loss', tensor=self.loss_avg)\n vml = tf.summary.scalar(name='validation_loss', tensor=self.loss_avg)\n lreg = tf.summary.scalar(name='l2_regularized_loss', tensor=self.loss_regularized)\n lr = tf.summary.scalar(name='learning_rate', tensor=self.lr)\n train_list = [tml, lreg, lr]\n valid_list = [vml]\n train_summaries = tf.summary.merge(\n self.encoder_a.summary_list + self.encoder_b.summary_list + train_list, name='train_summaries')\n valid_summaries = tf.summary.merge(valid_list, name='valid_summaries')\n return train_summaries, valid_summaries",
"def Summarize(self):\n top = Toplevel()\n top.title('SPEX::PREVIEW')\n top.geometry(\"400x300\")\n frameSummarize = LabelFrame(top, relief=RAISED, borderwidth=2)\n frameSummarize.pack(side=TOP, expand=True)\n\n # textSummarize = ['Spectrum or Image File Summary: ', 'Data Type: ', 'Time Bins:', 'Time range:', '#Energy Bins: ',\n # 'Area: ', 'Detectors Used: ', 'Response Info: ']\n # for each section call the parameters from header and data\n txt = [\"\\n\\n\\nSpectrum or Image File Summary\",\n \"\\nData Type: \", self.summarizeData[2], \n \"\\nFile name: \", self.name,\n \"\\n#Time Bins: \", self.time_len, \"Time range: \", self.timeData[0], 'to', self.timeData[1],\n \"\\n#Energy Bins: \", len(self.summarizeData[4]),\n \"Energy range: \", self.summarizeData[5], 'to', self.summarizeData[6],\n \"\\nArea: \", self.summarizeData[0],\n \"\\nDetectors Used: \", self.summarizeData[1],\n \"\\nResponse Info: \", self.name]\n list = Text(frameSummarize)\n list.insert(END, txt)\n list.pack()",
"def detector_stage(self):\n if self.is_detector_stage:\n return\n log.debug(\"Staging detector: removing hardware gains.\")\n self.channels.load_temporary_hardware_gains()\n int_nf.detector_stage(\n frame_data=self.frames.data,\n frame_valid=self.frames.valid,\n channel_indices=np.arange(self.channels.size),\n channel_hardware_gain=self.channels.data.hardware_gain)\n self.is_detector_stage = True",
"def driver_summary(self):\n buflen = 256\n buf = ctypes.create_string_buffer(buflen)\n ver = ctypes.c_uint32()\n self._call(GxFpga.GxFpgaGetDriverSummary, buf, buflen, ctypes.byref(ver))\n ver = ver.value\n\n return (buf.value, ver >> 16, ver & 0xffff)",
"def sense(self):\n\n out = {}\n\n # retrieve depth data\n result, resolution, data = vrep.simxGetVisionSensorDepthBuffer(self._clientID,\n self.sensors_handles['kinect_depth'],\n self._operation_mode)\n if result != vrep.simx_return_ok: # checking the reading result.\n exit(result)\n\n # get clean depth data\n out['depth'] = self.get_depth(data) # appending the distance depth.\n\n # retrieve vision sensor image\n result_vision, resolution, image = vrep.simxGetVisionSensorImage(self._clientID,\n self.sensors_handles['kinect_rgb'],\n 0,\n vrep.simx_opmode_blocking)\n # retrieve vision sensor filtered image (blob)\n result_blob, t0, t1 = vrep.simxReadVisionSensor(self._clientID,\n self.sensors_handles['kinect_rgb'],\n vrep.simx_opmode_blocking)\n\n # extract blob data\n out['vision'] = self.get_vision(resolution, image, t1)\n\n # get load status\n out['load'] = self._load\n\n self._term.write(\"sensed: {}\".format(out))\n\n return out",
"def summariseSuiteResult(self, suite):\n try:\n if self.mode not in [\"FAIL-SUMMARY\"]:\n return\n result = suite.result\n itemType = \"suite\"\n lines, textLen, dotLen = self.formatAnnouncement(suite.summary)\n text = \"\\n\".join(lines)\n writer = sNormal.write\n writer(\"%s%s%12s\\n\" % (text, \".\" * dotLen, result.state))\n finally:\n self.level += 1",
"def summarize(self, tiling=None):\n\t\t\t\ttmp = u\"Analysis Summary ================================\\n\"\n\t\t\t\ttmp = tmp + u\" Project: %s\\n\" % self._project\n\t\t\t\ttmp = tmp + u\" Sample: %s\\n\" % self._sample\n\t\t\t\ttmp = tmp + u\" Analysis: %s\\n\" % self._analysis\n\t\t\t\ttmp = tmp + u\" Instrument: %s's TESCAN MIRA3\\n\" % (SITE, )\n\t\t\t\ttmp = tmp + u\" Operator: %s\\n\" % self._analyst\n\t\t\t\ttmp = tmp + u\"Search and Measure ==============================\\n\"\n\t\t\t\ttmp = tmp + u\" FOV: %g mm × %g mm FOV\\n\" % (self._fov, self._fov)\n\t\t\t\tif self._overlap>1.0:\n\t\t\t\t\ttmp = tmp + u\" Border: %0.3g%% un-analyzed border region\\n\" % ( (self._overlap-1.0)*100.0, )\n\t\t\t\telif self._overlap<1.0:\n\t\t\t\t\ttmp = tmp + u\" Overlap: %0.3g%% overlap of adjacent fields\\n\" % ( (1.0 - self._overlap)*100.0, )\n\t\t\t\ttmp = tmp + u\" Detector: %s\\n\" % (self.getDetector())\n\t\t\t\tsed, bsed = _ts.dtGetGainBlack(findEDet(sedName).getIndex()), _ts.dtGetGainBlack(self._imgDet.getIndex())\n\t\t\t\ttmp = tmp + u\" BSED: Contrast %0.2f Brightness: %0.2f\\n\" % (bsed[0], bsed[1])\n\t\t\t\ttmp = tmp + u\" SED: Contrast %0.2f Brightness: %0.2f\\n\" % (sed[0], sed[1])\n\t\t\t\ttmp = tmp + u\" Search: %d to %d at %d pixels × %d pixels at %g µs/pixel\\n\" % (self._searchLow, self._searchHigh, self.searchDimension(), self.searchDimension(), self._searchDwell / 1000)\n\t\t\t\ttmp = tmp + u\" Measure: %d to %d at %d µs/pixel\\n\" % (self._measureLow, self._measureHigh, self._measureDwell / 1000)\n\t\t\t\ttmp = tmp + u\" Step: %d\\n\" % self._measureStep\n\t\t\t\ttmp = tmp + u\" Morphology: %s\\n\" % self._morphologyCrit\n\t\t\t\ttmp = tmp + u\" Max Particles: %d\\n\" % self._maxPart\n\t\t\t\ttmp = tmp + u\" Max per field: %d\\n\" % self._maxPartPerField\n\t\t\t\ttmp = tmp + u\" Beam Int: %0.2f\\n\" % _ts.getPCContinual()\n\t\t\t\ttmp = tmp + u\" Spot: %0.2f nm\\n\" % _ts.getSpotSize()\n\t\t\t\tif self._probeCurrent:\n\t\t\t\t\ttmp = tmp + u\" Faraday: %0.2f nA\\n\" % self._probeCurrent\n\t\t\t\telse:\n\t\t\t\t\ttmp = tmp + u\" IAbs: %0.2f nA\\n\" % (_ts.getIAbsorbed() / 1000.0)\n\t\t\t\ttmp = tmp + u\" Beam E: %0.2f keV\\n\" % (_ts.hvGetVoltage() / 1000.0)\n\t\t\t\ttmp = tmp + u\" WD: %0.3f mm\\n\" % _ts.getWD()\n\t\t\t\ttmp = tmp + u\"Vacuum ==========================================\\n\"\n\t\t\t\ttmp = tmp + u\" VP Mode: %s\\n\" % _ts.vacGetVPMode()\n\t\t\t\ttmp = tmp + u\" Chamber: %0.5g torr\\n\" % pascalToTorr(_ts.vacGetPressure(0))\n\t\t\t\ttmp = tmp + u\" Column: %0.5g torr\\n\" % pascalToTorr(_ts.vacGetPressure(1))\n\t\t\t\tif SITE!=WARRENDALE:\n\t\t\t\t\ttmp = tmp + u\"\t\t\t Gun: %0.5g torr\\n\" % pascalToTorr(_ts.vacGetPressure(2))\n\t\t\t\ttmp = tmp + u\"Images ==========================================\\n\"\n\t\t\t\tif self._collectImages:\n\t\t\t\t\ttmp = tmp + u\" Field: Collect at %d pixels × %d pixels\\n\" % (self._imgDim, self._imgDim)\n\t\t\t\telse:\n\t\t\t\t\ttmp = tmp + u\" Field: Don't collect\\n\"\n\t\t\t\ttmp = tmp + u\" Particle: Evaluate %s at %d pixels × %d pixels\\n\" % (self._collectPartImages.func_name, self._pImgDim, self._pImgDim)\n\t\t\t\tif self._collectEDS:\n\t\t\t\t\ttmp = tmp + u\"EDS Configuration ==============================\\n\"\n\t\t\t\t\ttmp = tmp + u\" EDS: %g s Real time\\n\" % self._edsRealTime\n\t\t\t\t\ttmp = tmp + u\" Mode: %s\\n\" % self._EDSMode\n\t\t\t\t\ttmp = tmp + u\" Vectors: %s\\n\" % self._vecs\n\t\t\t\t\ttmp = tmp + u\" Elements: %s\\n\" % (\", \".join(\"%s\" % v.toAbbrev() for v in self._vecs.getElements()))\n\t\t\t\t\ttmp = tmp + u\" Rules: %s\\n\" % self._rules\n\t\t\t\tif self._collectSI:\n\t\t\t\t\ttmp = tmp + u\"SI Collection ==================================\\n\"\n\t\t\t\t\ttmp = tmp + u\" When: Based on rule evaluation\\n\"\n\t\t\t\t\ttmp = tmp + u\" Dimensions: %d\\n\" % self._SIDim\n\t\t\t\t\ttmp = tmp + u\" Dwell: %d\\n\" % self._SIDwell\n\t\t\t\tif tiling:\n\t\t\t\t\ttmp = tmp + u\"Tiling ========================================\\n\"\n\t\t\t\t\ttmp = tmp + u\" Description: %s\\n\" % tiling\n\t\t\t\t\ttmp = tmp + u\" Tiles: %d of %g mm × %g mm\\n\" % (tiling.size(), tiling.getTileDimension()[0], tiling.getTileDimension()[0])\n\t\t\t\t\ttmp = tmp + u\" Area: %g\\n\" % (tiling.getArea(), )\n\t\t\t\tprint tmp\n\t\t\t\tfos = jio.FileOutputStream(jio.File(self._path, \"configuration.txt\"))\n\t\t\t\ttry:\n\t\t\t\t\tosw = jio.OutputStreamWriter(fos, \"UTF-8\")\n\t\t\t\t\tosw.write(tmp)\n\t\t\t\t\tosw.flush()\n\t\t\t\tfinally:\n\t\t\t\t\tfos.close()",
"def summary(self):\n buf = create_string_buffer(b'', size=256)\n self._call(GxFpga.GxFpgaGetBoardSummary, self._handle, buf, 256)\n \n return buf.value",
"def add_pruning_summaries(self):\n with tf.name_scope(self._spec.name + '_summaries'):\n tf.summary.scalar('sparsity', self._sparsity)\n tf.summary.scalar('last_mask_update_step', self._last_update_step)\n masks = get_masks()\n thresholds = get_thresholds()\n for mask, threshold in zip(masks, thresholds):\n if not self._exists_in_do_not_prune_list(mask.name):\n tf.summary.scalar(mask.op.name + '/sparsity',\n tf.nn.zero_fraction(mask))\n tf.summary.scalar(threshold.op.name + '/threshold', threshold)",
"def _summarize_expt(self):\n\n print('\\nCURRENT EXPERIMENT:\\n{line}'.format(line='-' * 50))\n print('Training percentage : {:.2}'.format(self.train_perc))\n print('Number of CV repetitions : {}'.format(self.num_rep_cv))\n print('Number of processors : {}'.format(self.num_procs))\n print('Dim reduction method : {}'.format(self.dim_red_method))\n print('Dim reduction size : {}'.format(self.reduced_dim))\n print('Predictive model chosen : {}'.format(self.pred_model))\n print('Grid search level : {}\\n'.format(self.grid_search_level))\n\n if len(self.covariates) > 0:\n print('Covarites selected : {}'.format(', '.join(self.covariates)))\n print('Deconfoudning method : {}\\n'.format(self.deconfounder))\n\n if self._workflow_type == 'classify':\n self._target_sizes = list(self.datasets.target_sizes.values())\n self._chance_accuracy = chance_accuracy(self._target_sizes, 'balanced')\n print('Estimated chance accuracy : {:.3f}\\n'\n ''.format(self._chance_accuracy))",
"def show_summary():\n for group_name, results in test_results:\n num_total = len(results)\n num_passed = sum(1 for x in results if x[0])\n num_failed = num_total - num_passed\n print(\"[STAT] Results for '%s' : %d%% [%d passed, %d failed] / %d total\" %\n (\n group_name,\n num_passed / num_total * 100,\n num_passed,\n num_failed,\n num_total\n )\n )",
"def collect(self) -> SparsifiedModelStatistics:\n weights_descriptions = self._collect_weights_descriptions()\n sparsity_level_for_model = _calculate_sparsity_level_for_model(weights_descriptions)\n\n total_params = sum(w.num_params for w in weights_descriptions if w.is_sparse)\n total_num_zero = sum(w.num_zero for w in weights_descriptions if w.is_sparse)\n sparsity_level_for_sparse_layers = total_num_zero / total_params\n\n sparse_layers_summary = []\n for w in weights_descriptions:\n if not w.is_sparse:\n continue\n\n weight_percentage = 100 * (w.num_params / total_params)\n sparse_layers_summary.append(SparsifiedLayerSummary(w.name, w.shape, w.sparsity_level, weight_percentage))\n\n sparse_model_stats = SparsifiedModelStatistics(\n sparsity_level_for_model, sparsity_level_for_sparse_layers, sparse_layers_summary\n )\n\n return sparse_model_stats",
"def _update_stage(self, scr, stage):\n msg = []\n for state, status in STATUS.items()[:-1]:\n if stage.status[state]:\n length = len(stage[state])\n if state == Builder.FAILED and not self._indirect:\n length -= len([i for i in stage[state]\n if \"failed\" not in i.flags])\n if not length:\n continue\n msg.append(\"%i %s\" % (length, status))\n\n if msg:\n stage_name = stage.stage.name\n scr.addstr(\n self._offset, 0, \"%s:%s%s\" %\n (stage_name[:8],\n \" \" * max(1, 9 - len(stage_name)), \", \".join(msg)))\n self._offset += 1",
"def get_all_case_stats(self, key=\"training\", transform_list=None):\n result: dict[DataStatsKeys, Any] = {DataStatsKeys.SUMMARY: {}, DataStatsKeys.BY_CASE: []}\n result_bycase: dict[DataStatsKeys, Any] = {DataStatsKeys.SUMMARY: {}, DataStatsKeys.BY_CASE: []}\n if self.device.type == \"cpu\":\n nprocs = 1\n logger.info(\"Using CPU for data analyzing!\")\n else:\n nprocs = torch.cuda.device_count()\n logger.info(f\"Found {nprocs} GPUs for data analyzing!\")\n if nprocs > 1:\n tmp_ctx = get_context(\"forkserver\")\n with tmp_ctx.Manager() as manager:\n manager_list = manager.list()\n processes = []\n for rank in range(nprocs):\n p = tmp_ctx.Process(\n target=self._get_all_case_stats, args=(rank, nprocs, manager_list, key, transform_list)\n )\n processes.append(p)\n for p in processes:\n p.start()\n for p in processes:\n p.join()\n # merge DataStatsKeys.BY_CASE\n for _ in manager_list:\n result_bycase[DataStatsKeys.BY_CASE].extend(_[DataStatsKeys.BY_CASE])\n else:\n result_bycase = self._get_all_case_stats(0, 1, None, key, transform_list)\n\n summarizer = SegSummarizer(\n self.image_key,\n self.label_key,\n average=self.average,\n do_ccp=self.do_ccp,\n hist_bins=self.hist_bins,\n hist_range=self.hist_range,\n histogram_only=self.histogram_only,\n )\n n_cases = len(result_bycase[DataStatsKeys.BY_CASE])\n result[DataStatsKeys.SUMMARY] = summarizer.summarize(cast(list, result_bycase[DataStatsKeys.BY_CASE]))\n result[DataStatsKeys.SUMMARY][\"n_cases\"] = n_cases\n result_bycase[DataStatsKeys.SUMMARY] = result[DataStatsKeys.SUMMARY]\n if not self._check_data_uniformity([ImageStatsKeys.SPACING], result):\n logger.info(\"Data spacing is not completely uniform. MONAI transforms may provide unexpected result\")\n if self.output_path:\n logger.info(f\"Writing data stats to {self.output_path}.\")\n ConfigParser.export_config_file(\n result, self.output_path, fmt=self.fmt, default_flow_style=None, sort_keys=False\n )\n by_case_path = self.output_path.replace(f\".{self.fmt}\", f\"_by_case.{self.fmt}\")\n if by_case_path == self.output_path: # self.output_path not ended with self.fmt?\n by_case_path += f\".by_case.{self.fmt}\"\n logger.info(f\"Writing by-case data stats to {by_case_path}, this may take a while.\")\n ConfigParser.export_config_file(\n result_bycase, by_case_path, fmt=self.fmt, default_flow_style=None, sort_keys=False\n )\n # release memory\n if self.device.type == \"cuda\":\n # release unreferenced tensors to mitigate OOM\n # limitation: https://github.com/pytorch/pytorch/issues/12873#issuecomment-482916237\n torch.cuda.empty_cache()\n result[DataStatsKeys.BY_CASE] = result_bycase[DataStatsKeys.BY_CASE]\n return result",
"def sense(img, k=1000, basis=\"wvt\", wvt_level=4, alpha=None):\n print \"Image size:\", img.shape\n img_f = img.flatten()\n print \"Build sensing matrix\"\n\n A = np.random.normal(0, 1, len(img_f)*k).astype(np.float32).\\\n reshape(k, img.shape[0], img.shape[1])\n\n print \"Measurement\"\n b = np.dot(A.reshape(k, len(img_f)), img_f)\n\n if basis == \"wvt\":\n print \"Wavelets\"\n trans_A = [utils.dwt2(A[i].reshape(img.shape), level=wvt_level).\n astype(np.float16).flatten() for i in range(k)]\n elif basis == \"dct\":\n print \"DCT\"\n trans_A = [utils.dct2(A[i].reshape(img.shape)).\n astype(np.float16).flatten() for i in range(k)]\n else:\n raise Exception(\"Unknown basis\")\n\n A = None\n\n if alpha:\n lasso = lm.Lasso(alpha=alpha, max_iter=100000, normalize=True)\n print \"Fit\"\n lasso.fit(trans_A, b)\n else:\n lasso_cv = lm.LassoCV(n_jobs=cpu_count(), max_iter=100000,\n normalize=True)\n print \"Fit\"\n lasso_cv.fit(trans_A, b)\n print \"Alpha: %.6f\" % lasso_cv.alpha_\n lasso = lm.Lasso(alpha=lasso_cv.alpha_, max_iter=100000,\n normalize=True)\n print \"Fit\"\n lasso.fit(trans_A, b)\n\n if basis == \"wvt\":\n return utils.idwt2(lasso.coef_.reshape(img.shape), level=wvt_level)\n elif basis == \"dct\":\n return utils.idct2(lasso.coef_.reshape(img.shape))\n else:\n raise Exception(\"Unknown basis\")",
"def summarize(self):\n\n if not self.from_torch_called_ and not self.from_tensorflow_called_ and self.layers_ < 2:\n if not self.from_torch_called_ and not self.from_tensorflow_called_:\n raise ValueError('This model has not yet been created. Create the model first by calling `from_pytorch()` or calling `from_tensorflow()`')\n else:\n raise ValueError('The model has not been built yet or the model is not supported.\\n Check the docs for further information')\n \n title = \"Neural Network Architecture\"\n hline = \"+\"+\"-\"*69+\"+\"\n\n print(hline)\n print(\"|\"+title.center(69)+\"|\")\n print(hline)\n print(\"|\"+\"Layer Name\".center(28)+\"|\"+\"Layer Type\".center(24)+\"|\"+\"Layer Units\".center(15)+\"|\")\n print(hline)\n for i in range(self.layers_):\n col1 = self.layer_names_[i].center(28)\n col2 = self.layer_types_[i].capitalize().center(24)\n col3 = str(self.layer_units_[i]).center(15)\n print(\"|\"+col1+\"|\"+col2+\"|\"+col3+\"|\")\n print(hline)\n\n return"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the (scaled) coincidence matrix.
|
def get_coincidence_matrix(self, scaled=True):
return super().get_coincidence_matrix(scaled=scaled)
|
[
"def influence_matrix(self) -> np.ndarray:",
"def get_membership_matrix(self):\n import numpy as np\n matrix = []\n for i in self.clusters:\n matrix.append(self.clusters[i]['indicator'])\n matrix = np.array(matrix)\n return matrix",
"def generate_cnk_matrix(self):\r\n total = self.rator_number\r\n cnk_matrix = np.zeros((total - 1, total))\r\n\r\n for column in range(total):\r\n cnk_matrix[:, column] = np.concatenate((np.where(self.combination_list[:, 0] == column)[0],\r\n np.where(self.combination_list[:, 1] == column)[0]))\r\n\r\n return cnk_matrix.astype(int)",
"def cofactor_matrix(self):\n return Matrix(self.row_n, self.col_n, [self.cofactor(i, j) for i in range(1, self.row_n + 1) for j in range(1, self.col_n + 1)])",
"def _matrix_(self):\n return self.to_matrix()",
"def incidence_matrix(labels):\n Npts = len(labels)\n incidence_matrix = np.zeros((Npts,Npts))\n for i in range(Npts):\n for j in range(Npts):\n if labels[i] == labels[j]:\n incidence_matrix[i][j] = 1\n else:\n incidence_matrix[i][j] = 0\n return(incidence_matrix)",
"def contact_matrix(self):\t\t\n\t\tJp = self.J.view(self.L, self.L)\n\t\tS_FN = torch.sqrt(Jp*Jp)\n\n\t\treturn S_FN.data",
"def get_conformers_distmatrix (self) :\n ncomformers = len(self.geometries)\n for i,(en,b) in enumerate(zip(self.energies,self.bconsts)) :\n for j in range(i+1,len(self.geometries)) :\n dist = self.get_overlap_with_conformer(j,en,b)\n dist_matrix[i,j] = dist\n # Symmetrize\n dist_matrix = dist_matrix + dist_matrix.transpose()\n return dist_matrix",
"def get_pairwise_chi_matrix(self):\n\n\t\treturn self._pairwise_chi_matrix",
"def get_full_matrix(correlations):\n n = correlations.shape[1]\n matrix = np.zeros((n,n), dtype=np.uint8)\n for i in range(n):\n for j in range(correlations.shape[0]):\n if correlations[j,i] == 1:\n col = i+j+1\n if col < n and col >= 0:\n matrix[i,col] = 1\n matrix[col,i] = 1\n return matrix",
"def calc_overlap_matrix(self, points=None):",
"def get_kc_mat(self):\n if use_opt_einsum:\n kcmat1 = contract('j,l, ijkl->ik', self.pv, self.pv, self.etensor.Cijkl)\n else:\n kcmat1 = np.einsum('j,l, ijkl->ik', self.pv, self.pv, self.etensor.Cijkl)\n ###\n # kcmat2 = np.dot(self.pv, np.dot(self.pv, self.etensor.Cijkl))\n # if not np.allclose(kcmat1, kcmat2): raise ValueError('Error Christoffel Matrix')\n ###\n self.kcmat=kcmat1\n return",
"def covarMatrix(x):\n return np.matrix(x - np.mean(x, axis=0)[np.newaxis, :]).T * np.matrix(x - np.mean(x, axis=0)[np.newaxis, :])",
"def _get_conf_mat(self):\n conf_clean, conf_cat = read_confounds(self.confounds)\n conf_mat = pd.get_dummies(self.data[conf_clean], columns=conf_cat, \n drop_first=True)\n return conf_mat.to_numpy()",
"def EC_matrix(self):\n Cmat = np.zeros((2, 2))\n CJ1 = 1. / (2 * self.ECJ1) # capacitances in units where e is set to 1\n CJ2 = 1. / (2 * self.ECJ2)\n CJ3 = 1. / (2 * self.ECJ3)\n Cg1 = 1. / (2 * self.ECg1)\n Cg2 = 1. / (2 * self.ECg2)\n\n Cmat[0, 0] = CJ1 + CJ3 + Cg1\n Cmat[1, 1] = CJ2 + CJ3 + Cg2\n Cmat[0, 1] = -CJ3\n Cmat[1, 0] = -CJ3\n\n return np.linalg.inv(Cmat) / 2.",
"def get_inner_matrix(self):\n return self.matrix",
"def get_similarity_matrix(self, c, q):\n c_len, q_len = c.size(1), q.size(1)\n c = F.dropout(c, self.drop_prob, self.training) # (bs, c_len, hid_size)\n q = F.dropout(q, self.drop_prob, self.training) # (bs, q_len, hid_size)\n # Shapes: (batch_size, c_len, q_len)\n s0 = torch.matmul(c, self.c_weight).expand([-1, -1, q_len])\n s1 = torch.matmul(q, self.q_weight).transpose(1, 2)\\\n .expand([-1, c_len, -1])\n s2 = torch.matmul(c * self.cq_weight, q.transpose(1, 2))\n s = s0 + s1 + s2 + self.bias\n\n return s",
"def correlation_matrix(trains):\r\n# -----------------------------------------------------------------------------\r\n result = [[0 for i in xrange(len(trains))] for j in xrange(len(trains))]\r\n for i in xrange(len(trains)):\r\n print i\r\n for j in xrange(len(trains)):\r\n if i>=j:\r\n corr = sum(np.array(trains[i])*np.array(trains[j]))*2.0/(sum(np.array(trains[i]))+sum(np.array(trains[j])))\r\n result[i][j] = corr\r\n result[j][i] = corr\r\n return result",
"def get_similarity_matrix(self, c, q):\n p_len, q_len = c.size(1), q.size(1)\n # (bs, p_len, hid_size)\n c = F.dropout(c, self.drop_prob, self.training)\n # (bs, q_len, hid_size)\n q = F.dropout(q, self.drop_prob, self.training)\n\n # Shapes: (batch_size, p_len, q_len)\n s0 = torch.matmul(c, self.p_weight).expand([-1, -1, q_len])\n s1 = torch.matmul(q, self.q_weight).transpose(1, 2)\\\n .expand([-1, p_len, -1])\n s2 = torch.matmul(c * self.cq_weight, q.transpose(1, 2))\n s = s0 + s1 + s2 + self.bias\n\n return s"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a summary of the REM detection, optionally grouped across stage.
|
def summary(self, grp_stage=False, mask=None, aggfunc="mean", sort=True):
# ``grp_chan`` is always False for REM detection because the
# REMs are always detected on a combination of LOC and ROC.
return super().summary(
event_type="rem",
grp_chan=False,
grp_stage=grp_stage,
aggfunc=aggfunc,
sort=sort,
mask=mask,
)
|
[
"def summarise(self) -> None:\n with open(self.parsed_replay_file, \"r\") as f:\n self.parsed_replay = [line for line in f]\n\n # Some parsing stuff here\n\n self.match_summary = {\n \"match_id\": 123345,\n \"match_date\": \"2019-07-07\", #If we can get it, otherwise upload timestamp from the meta file\n \"radiant\": \"Radiant team name\",\n \"dire\": \"Dire team name\",\n \"radiant_won\": True,\n \"radiant_kills\": 22,\n \"dire_kills\": 3,\n \"duration\": 3600, # Time in seconds,\n \"first_blood_time\": 120, # Time in seconds\n \"first_blood_hero\": \"Hero name\",\n \"picks\": {\n \"radiant\": {\n \"pick_1\": \"Hero name\",\n \"pick_2\": \"Hero name\"\n # etc\n },\n \"dire\": {\n \"pick_1\": \"Hero name\",\n \"pick_2\": \"Hero name\"\n }\n },\n \"bans\": {\n \"radiant\": {\n \"ban_1\": \"Hero name\",\n \"ban_2\": \"Hero name\"\n },\n \"dire\": {\n \"ban_1\": \"Hero name\",\n \"ban_2\": \"Hero name\"\n }\n }\n }\n\n # A list of player summaries\n self.player_summaries = [\n {\n \"match_id\": 123345,\n \"hero\": \"Hero name\",\n \"player\": \"Player name\",\n \"team\": \"Team name\",\n \"side\": \"Radiant\",\n \"won\": True,\n \"kills\": 30,\n \"deaths\": 5,\n \"assists\": 6,\n \"net_worth\": 31493, # At end of game\n \"level\": 25,\n \"gpm\": 800,\n \"xpm\": 400,\n \"last_hits\": 200,\n \"denies\": 30,\n \"hero_damage\": 10000,\n \"building_damage\": 20000,\n \"damage_taken\": 5000,\n \"biggest_kill_streak\": 4,\n \"bounty_runes\": 4,\n \"wards_placed\": 5,\n \"items\": { # Not sure on this data structure\n \"slot_1\": {\n \"name\": \"BKB\",\n \"time\": 900 # Game time item bought in seconds\n } # repeat for other item slots and backpack\n },\n \"timings\": {\n \"gold\": {\n 0: 600,\n 1: 800\n # per minute net worth total\n },\n \"xp\": {\n 0: 0,\n 1: 150\n # per minute xp total\n }\n }\n\n }\n ]",
"def _summarize_expt(self):\n\n print('\\nCURRENT EXPERIMENT:\\n{line}'.format(line='-' * 50))\n print('Training percentage : {:.2}'.format(self.train_perc))\n print('Number of CV repetitions : {}'.format(self.num_rep_cv))\n print('Number of processors : {}'.format(self.num_procs))\n print('Dim reduction method : {}'.format(self.dim_red_method))\n print('Dim reduction size : {}'.format(self.reduced_dim))\n print('Predictive model chosen : {}'.format(self.pred_model))\n print('Grid search level : {}\\n'.format(self.grid_search_level))\n\n if len(self.covariates) > 0:\n print('Covarites selected : {}'.format(', '.join(self.covariates)))\n print('Deconfoudning method : {}\\n'.format(self.deconfounder))\n\n if self._workflow_type == 'classify':\n self._target_sizes = list(self.datasets.target_sizes.values())\n self._chance_accuracy = chance_accuracy(self._target_sizes, 'balanced')\n print('Estimated chance accuracy : {:.3f}\\n'\n ''.format(self._chance_accuracy))",
"def summariseSuiteResult(self, suite):",
"def add_pruning_summaries(self):\n with tf.name_scope(self._spec.name + '_summaries'):\n tf.summary.scalar('sparsity', self._sparsity)\n tf.summary.scalar('last_mask_update_step', self._last_update_step)\n masks = get_masks()\n thresholds = get_thresholds()\n for mask, threshold in zip(masks, thresholds):\n if not self._exists_in_do_not_prune_list(mask.name):\n tf.summary.scalar(mask.op.name + '/sparsity',\n tf.nn.zero_fraction(mask))\n tf.summary.scalar(threshold.op.name + '/threshold', threshold)",
"def summariseResult(self, test):",
"def Summarize(self):\n top = Toplevel()\n top.title('SPEX::PREVIEW')\n top.geometry(\"400x300\")\n frameSummarize = LabelFrame(top, relief=RAISED, borderwidth=2)\n frameSummarize.pack(side=TOP, expand=True)\n\n # textSummarize = ['Spectrum or Image File Summary: ', 'Data Type: ', 'Time Bins:', 'Time range:', '#Energy Bins: ',\n # 'Area: ', 'Detectors Used: ', 'Response Info: ']\n # for each section call the parameters from header and data\n txt = [\"\\n\\n\\nSpectrum or Image File Summary\",\n \"\\nData Type: \", self.summarizeData[2], \n \"\\nFile name: \", self.name,\n \"\\n#Time Bins: \", self.time_len, \"Time range: \", self.timeData[0], 'to', self.timeData[1],\n \"\\n#Energy Bins: \", len(self.summarizeData[4]),\n \"Energy range: \", self.summarizeData[5], 'to', self.summarizeData[6],\n \"\\nArea: \", self.summarizeData[0],\n \"\\nDetectors Used: \", self.summarizeData[1],\n \"\\nResponse Info: \", self.name]\n list = Text(frameSummarize)\n list.insert(END, txt)\n list.pack()",
"def summarize(self, tiling=None):\n\t\t\t\ttmp = u\"Analysis Summary ================================\\n\"\n\t\t\t\ttmp = tmp + u\" Project: %s\\n\" % self._project\n\t\t\t\ttmp = tmp + u\" Sample: %s\\n\" % self._sample\n\t\t\t\ttmp = tmp + u\" Analysis: %s\\n\" % self._analysis\n\t\t\t\ttmp = tmp + u\" Instrument: %s's TESCAN MIRA3\\n\" % (SITE, )\n\t\t\t\ttmp = tmp + u\" Operator: %s\\n\" % self._analyst\n\t\t\t\ttmp = tmp + u\"Search and Measure ==============================\\n\"\n\t\t\t\ttmp = tmp + u\" FOV: %g mm × %g mm FOV\\n\" % (self._fov, self._fov)\n\t\t\t\tif self._overlap>1.0:\n\t\t\t\t\ttmp = tmp + u\" Border: %0.3g%% un-analyzed border region\\n\" % ( (self._overlap-1.0)*100.0, )\n\t\t\t\telif self._overlap<1.0:\n\t\t\t\t\ttmp = tmp + u\" Overlap: %0.3g%% overlap of adjacent fields\\n\" % ( (1.0 - self._overlap)*100.0, )\n\t\t\t\ttmp = tmp + u\" Detector: %s\\n\" % (self.getDetector())\n\t\t\t\tsed, bsed = _ts.dtGetGainBlack(findEDet(sedName).getIndex()), _ts.dtGetGainBlack(self._imgDet.getIndex())\n\t\t\t\ttmp = tmp + u\" BSED: Contrast %0.2f Brightness: %0.2f\\n\" % (bsed[0], bsed[1])\n\t\t\t\ttmp = tmp + u\" SED: Contrast %0.2f Brightness: %0.2f\\n\" % (sed[0], sed[1])\n\t\t\t\ttmp = tmp + u\" Search: %d to %d at %d pixels × %d pixels at %g µs/pixel\\n\" % (self._searchLow, self._searchHigh, self.searchDimension(), self.searchDimension(), self._searchDwell / 1000)\n\t\t\t\ttmp = tmp + u\" Measure: %d to %d at %d µs/pixel\\n\" % (self._measureLow, self._measureHigh, self._measureDwell / 1000)\n\t\t\t\ttmp = tmp + u\" Step: %d\\n\" % self._measureStep\n\t\t\t\ttmp = tmp + u\" Morphology: %s\\n\" % self._morphologyCrit\n\t\t\t\ttmp = tmp + u\" Max Particles: %d\\n\" % self._maxPart\n\t\t\t\ttmp = tmp + u\" Max per field: %d\\n\" % self._maxPartPerField\n\t\t\t\ttmp = tmp + u\" Beam Int: %0.2f\\n\" % _ts.getPCContinual()\n\t\t\t\ttmp = tmp + u\" Spot: %0.2f nm\\n\" % _ts.getSpotSize()\n\t\t\t\tif self._probeCurrent:\n\t\t\t\t\ttmp = tmp + u\" Faraday: %0.2f nA\\n\" % self._probeCurrent\n\t\t\t\telse:\n\t\t\t\t\ttmp = tmp + u\" IAbs: %0.2f nA\\n\" % (_ts.getIAbsorbed() / 1000.0)\n\t\t\t\ttmp = tmp + u\" Beam E: %0.2f keV\\n\" % (_ts.hvGetVoltage() / 1000.0)\n\t\t\t\ttmp = tmp + u\" WD: %0.3f mm\\n\" % _ts.getWD()\n\t\t\t\ttmp = tmp + u\"Vacuum ==========================================\\n\"\n\t\t\t\ttmp = tmp + u\" VP Mode: %s\\n\" % _ts.vacGetVPMode()\n\t\t\t\ttmp = tmp + u\" Chamber: %0.5g torr\\n\" % pascalToTorr(_ts.vacGetPressure(0))\n\t\t\t\ttmp = tmp + u\" Column: %0.5g torr\\n\" % pascalToTorr(_ts.vacGetPressure(1))\n\t\t\t\tif SITE!=WARRENDALE:\n\t\t\t\t\ttmp = tmp + u\"\t\t\t Gun: %0.5g torr\\n\" % pascalToTorr(_ts.vacGetPressure(2))\n\t\t\t\ttmp = tmp + u\"Images ==========================================\\n\"\n\t\t\t\tif self._collectImages:\n\t\t\t\t\ttmp = tmp + u\" Field: Collect at %d pixels × %d pixels\\n\" % (self._imgDim, self._imgDim)\n\t\t\t\telse:\n\t\t\t\t\ttmp = tmp + u\" Field: Don't collect\\n\"\n\t\t\t\ttmp = tmp + u\" Particle: Evaluate %s at %d pixels × %d pixels\\n\" % (self._collectPartImages.func_name, self._pImgDim, self._pImgDim)\n\t\t\t\tif self._collectEDS:\n\t\t\t\t\ttmp = tmp + u\"EDS Configuration ==============================\\n\"\n\t\t\t\t\ttmp = tmp + u\" EDS: %g s Real time\\n\" % self._edsRealTime\n\t\t\t\t\ttmp = tmp + u\" Mode: %s\\n\" % self._EDSMode\n\t\t\t\t\ttmp = tmp + u\" Vectors: %s\\n\" % self._vecs\n\t\t\t\t\ttmp = tmp + u\" Elements: %s\\n\" % (\", \".join(\"%s\" % v.toAbbrev() for v in self._vecs.getElements()))\n\t\t\t\t\ttmp = tmp + u\" Rules: %s\\n\" % self._rules\n\t\t\t\tif self._collectSI:\n\t\t\t\t\ttmp = tmp + u\"SI Collection ==================================\\n\"\n\t\t\t\t\ttmp = tmp + u\" When: Based on rule evaluation\\n\"\n\t\t\t\t\ttmp = tmp + u\" Dimensions: %d\\n\" % self._SIDim\n\t\t\t\t\ttmp = tmp + u\" Dwell: %d\\n\" % self._SIDwell\n\t\t\t\tif tiling:\n\t\t\t\t\ttmp = tmp + u\"Tiling ========================================\\n\"\n\t\t\t\t\ttmp = tmp + u\" Description: %s\\n\" % tiling\n\t\t\t\t\ttmp = tmp + u\" Tiles: %d of %g mm × %g mm\\n\" % (tiling.size(), tiling.getTileDimension()[0], tiling.getTileDimension()[0])\n\t\t\t\t\ttmp = tmp + u\" Area: %g\\n\" % (tiling.getArea(), )\n\t\t\t\tprint tmp\n\t\t\t\tfos = jio.FileOutputStream(jio.File(self._path, \"configuration.txt\"))\n\t\t\t\ttry:\n\t\t\t\t\tosw = jio.OutputStreamWriter(fos, \"UTF-8\")\n\t\t\t\t\tosw.write(tmp)\n\t\t\t\t\tosw.flush()\n\t\t\t\tfinally:\n\t\t\t\t\tfos.close()",
"def summary(self):\n\n failedNumber = 0\n successfulNumber = 0\n\n self.logUtil.log(\"Test Summary\")\n\n for runName in self.summaries:\n\n successfulCases = self.summaries[runName]['successfulCases']\n failedCases = self.summaries[runName]['failedCases']\n\n successfulNumber += len(successfulCases)\n failedNumber += len(failedCases)\n\n self.logUtil.log(\"++++++++++++++++++++++++++++++\")\n self.logUtil.log(\"Test Run:\" + runName)\n self.logUtil.log(\"++++++++++++++++++++++++++++++\")\n\n self.logUtil.log(\"Successful cases (\" + str(len(successfulCases)) + '):', 'success')\n\n for case in successfulCases:\n self.logUtil.log('+ ' + case['id'] + ' (' + str(case['testResult']['actualTime']) + ' ms)')\n\n self.logUtil.log(\"\")\n self.logUtil.log(\"Failed cases (\" + str(len(failedCases)) + '):', \"warning\")\n\n for case in failedCases:\n testResult = case['testResult']\n self.logUtil.log('+ ' + case['id'] + ' (' + str(testResult['actualTime']) + ' ms)')\n\n self.logUtil.log(\"-----------------------------\")\n self.logUtil.log(\"Reason: \")\n\n if not testResult['expectedValueTestResult'] and not (testResult['expectedReturn'] is None or testResult['expectedReturn'] == 'any'):\n self.logUtil.log(\"Value dosn't match: (expect - \" + str(testResult['expectedReturn']) + \" | actual - \" + str(testResult['actualReturn']) + ')', 'warning')\n\n if not testResult['expectedTimeTestResult'] and not (testResult['expectedTime'] is None or testResult['expectedTime'] == 'any'):\n self.logUtil.log(\"Unexpected time consumption (ms): (expect - \" + str(testResult['expectedTime']) + \" | actual - \" + str(testResult['actualTime']) + ')', 'warning')\n\n self.logUtil.log(\"-----------------------------\")\n\n return {\n 'successNumber': successfulNumber,\n 'failedNumber': failedNumber,\n }",
"def summary(self) -> None:\n self.model.summary()",
"def show_summary():\n for group_name, results in test_results:\n num_total = len(results)\n num_passed = sum(1 for x in results if x[0])\n num_failed = num_total - num_passed\n print(\"[STAT] Results for '%s' : %d%% [%d passed, %d failed] / %d total\" %\n (\n group_name,\n num_passed / num_total * 100,\n num_passed,\n num_failed,\n num_total\n )\n )",
"def metadata_count_summary(md):\n # Perform image counts (total, ok images and junk images)\n total_images = md['brand'].value_counts()\n ok_images = md[md.ok]['brand'].value_counts()\n junk_images = md[md.ok == False]['brand'].value_counts()\n\n # Build a dataframe of image counts from the input dataset\n summary = pd.concat([ok_images, junk_images, total_images], axis=1, sort=True)\n summary.columns = ['#OK', '#Junk', 'Total']\n summary.index.name = 'Logo name'\n return summary",
"def summary(self) -> str:",
"def get_summary(master_bricks, mastervol, slavehost, slavevol):\n checkpoints_complete_count = 0\n num_bricks = len(master_bricks)\n num_faulty = 0\n num_active_bricks = 0\n num_passive_bricks = 0\n num_down = 0\n down_nodes = []\n faulty_nodes = []\n\n georep_status = get_georep_status(mastervol, slavehost, slavevol)\n for b in master_bricks:\n if georep_status.get(b, None) is not None:\n if georep_status.get(b)[\"status\"] == \"Active\":\n num_active_bricks += 1\n\n if georep_status.get(b)[\"status\"] == \"Passive\":\n num_passive_bricks += 1\n\n if georep_status.get(b)[\"checkpoint_status\"] == \"Yes\":\n checkpoints_complete_count += 1\n\n if georep_status.get(b)[\"status\"].lower() == \"faulty\":\n num_faulty += 1\n faulty_nodes.append(b)\n else:\n # If a Master Brick node is down\n num_down += 1\n down_nodes.append(b)\n\n return {\n \"num_bricks\": num_bricks,\n \"checkpoints_complete_count\": checkpoints_complete_count,\n \"num_faulty\": num_faulty,\n \"num_down\": num_down,\n \"ok\": ((num_active_bricks == checkpoints_complete_count) and\n num_faulty == 0 and num_down == 0),\n \"down_nodes\": down_nodes,\n \"faulty_nodes\": faulty_nodes,\n \"status_ok\": (num_faulty == 0 and num_down == 0)\n }",
"def test_summary(self):\n model = self.model\n model.summary()",
"def get_summary(self) -> dict:\n tuple_df = self.get_results_by_tuple_df().reset_index()\n region_df = self.get_results_by_region_df().reset_index()\n\n value_at_risk_summary = (\n {\n \"eta\": value(self._eta),\n \"beta\": self._beta,\n \"alpha\": self._alpha,\n }\n if self._beta != 0.0\n else {}\n )\n return {\n \"status\": LpStatus[self._model.status],\n \"objective\": value(self._model.objective),\n \"expected_profit\": value(self._expected_profit),\n **value_at_risk_summary,\n # the values belows are summed up for all possible scenarios\n \"n_trips_avg\": tuple_df[\"trips\"].sum() / self._n_scenarios,\n \"n_unfilled_demand_avg\": (tuple_df[\"demand\"] - tuple_df[\"trips\"]).sum()\n / self._n_scenarios,\n \"demand_avg\": tuple_df[\"demand\"].sum() / self._n_scenarios,\n \"n_parking_avg\": region_df[\"n_parking\"].sum() / self._n_scenarios,\n \"n_relocations_avg\": tuple_df[\"relocations\"].sum() / self._n_scenarios,\n }",
"def get_model_summaries(self):\n\n print(\"[INFO] Printing Model Summaries\\n\")\n\n # Print model summaries\n print(self.generator.layers[1].summary(), end = \"\\n\\n\")\n print(self.discriminator.layers[1].summary(), end = \"\\n\\n\")\n\n # Create blank lists\n gen, disc = [], []\n\n # Get the model summaries and append to the lists\n self.generator.layers[1].summary(print_fn=lambda x: gen.append(x))\n self.discriminator.layers[1].summary(print_fn=lambda x: gen.append(x))\n\n # Save lists to a text file\n with open(get_path(f\"{self.ckpt_path}/{self.file_name}/model_summaries.txt\"), \"w\") as f:\n\n for item in gen:\n f.write(f\"{item}\\n\")\n\n f.write(\"\\n\")\n\n for item in disc:\n f.write(f\"{item}\\n\")\n\n return None",
"def figures_of_merit(self):\n\n test_len = len(self.data.X_test)\n\n TP = self.matrix[1][1]\n TN = self.matrix[0][0]\n FP = self.matrix[0][1]\n FN = self.matrix[1][0]\n\n TPP = (TP * 100)/test_len\n FPP = (FP * 100)/test_len\n FNP = (FN * 100)/test_len\n TNP = (TN * 100)/test_len\n\n specificity = TN/(TN+FP)\n\n misc = FP + FN\n miscp = (misc * 100)/test_len\n\n self.helpers.logger.info(\n \"True Positives: \" + str(TP) + \"(\" + str(TPP) + \"%)\")\n self.helpers.logger.info(\n \"False Positives: \" + str(FP) + \"(\" + str(FPP) + \"%)\")\n self.helpers.logger.info(\n \"True Negatives: \" + str(TN) + \"(\" + str(TNP) + \"%)\")\n self.helpers.logger.info(\n \"False Negatives: \" + str(FN) + \"(\" + str(FNP) + \"%)\")\n\n self.helpers.logger.info(\n \"Specificity: \" + str(specificity))\n self.helpers.logger.info(\n \"Misclassification: \" + str(misc) + \"(\" + str(miscp) + \"%)\")",
"def extract_results():\n reader = easyocr.Reader(['en']) # need to run only once to load model into memory\n DIR = \"/data/\"\n results = {}\n for folder in os.listdir(DIR):\n results[folder] = {}\n sub_path = os.path.join(DIR, folder)\n for filename in os.listdir(sub_path):\n file_path = os.path.join(DIR, folder, filename)\n image = cv2.imread(file_path)\n data = reader.readtext(preprocess_image(image), detail=1)\n if not data:\n data = \"missing_value\"\n print(data)\n else:\n result = data[0][1]\n proba = data[0][2]\n print(f\"{result} - {round(proba*100, 2)}%\")\n image = cv2.imread(file_path)\n results[folder][filename[:-4]] = [result, round(proba*100, 2)]\n return results",
"def _parse_stages(self):\n patterns = [r\"\\s+\\d+:\\s+(?P<name>.*):\"]\n for prefix in [\"time\", \"flop\", \"mess\", \"mess_len\", \"rdct\"]:\n pattern = (r\"\\s+(?P<{prefix}>{sci_num})\"\n r\"\\s+(?P<{prefix}_percent>{percent})\"\n .format(prefix=prefix,\n sci_num=Pattern.scientific_notation, \n percent=Pattern.percent))\n patterns.append(pattern)\n\n matches = re.finditer(\"\".join(patterns), self._text)\n\n return [match.groupdict() for match in matches]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the raw or filtered data of each detected event after centering to a specific timepoint.
|
def get_sync_events(
self, center="Peak", time_before=0.4, time_after=0.4, filt=(None, None), mask=None
):
from yasa.others import get_centered_indices
assert time_before >= 0
assert time_after >= 0
bef = int(self._sf * time_before)
aft = int(self._sf * time_after)
if any(filt):
data = mne.filter.filter_data(
self._data, self._sf, l_freq=filt[0], h_freq=filt[1], method="fir", verbose=False
)
else:
data = self._data
# Apply mask
mask = self._check_mask(mask)
masked_events = self._events.loc[mask, :]
time = np.arange(-bef, aft + 1, dtype="int") / self._sf
# Get location of peaks in data
peaks = (masked_events[center] * self._sf).astype(int).to_numpy()
# Get centered indices (here we could use second channel as well).
idx, idx_valid = get_centered_indices(data[0, :], peaks, bef, aft)
# If no good epochs are returned raise a warning
assert len(idx_valid), (
"Time before and/or time after exceed data bounds, please "
"lower the temporal window around center."
)
# Initialize empty dataframe
df_sync = pd.DataFrame()
# Loop across both EOGs (LOC and ROC)
for i, ch in enumerate(self._ch_names):
amps = data[i, idx]
df_chan = pd.DataFrame(amps.T)
df_chan["Time"] = time
df_chan = df_chan.melt(id_vars="Time", var_name="Event", value_name="Amplitude")
df_chan["Channel"] = ch
df_chan["IdxChannel"] = i
df_sync = pd.concat([df_sync, df_chan], axis=0, ignore_index=True)
return df_sync
|
[
"def get_cursor_data(self, event):\n xmin, xmax, ymin, ymax = self.get_extent()\n if self.origin == 'upper':\n ymin, ymax = ymax, ymin\n arr = self.get_array()\n data_extent = mtransforms.Bbox([[ymin, xmin], [ymax, xmax]])\n array_extent = mtransforms.Bbox([[0, 0], arr.shape[:2]])\n trans = mtransforms.BboxTransform(boxin=data_extent,\n boxout=array_extent)\n y, x = event.ydata, event.xdata\n i, j = trans.transform_point([y, x]).astype(int)\n # Clip the coordinates at array bounds\n if not (0 <= i < arr.shape[0]) or not (0 <= j < arr.shape[1]):\n return None\n else:\n return arr[i, j]",
"def getRawForceTriggerData(self,count=1000):\n dat = self.dev.dma_events(count, 1024, force_trig=True)\n print \"Acquisition complete.\"\n return surf_dataset.buildDataset(dat, count)",
"def datasetEvents(self, dataset, eventSize=1024):\n return np.reshape(dataset['data'], (dataset['count'], eventSize))",
"def _get_raw_events(self, subsample=False):\n if subsample:\n return self._raw_events[self.subsample_indices]\n else:\n return self._raw_events",
"def running_mean(data, npoints): \n y = np.convolve(data,1.0/npoints*np.ones(npoints),mode=\"same\")\n return y[npoints/2:-npoints/2]",
"def raw(self, evt) -> Array3d:\n if evt is None: return None\n segs = self._segments(evt) # dict = {seg_index: seg_obj}\n if is_none(segs, 'self._segments(evt) is None'): return None\n return np.stack([segs[k].raw for k in self._segment_numbers])",
"def events(data, mNp):\n # t0 = time.time()\n s = []\n pixels = []\n sshape = np.shape(data)\n if len(sshape) == 3:\n nframes, nx, ny = np.shape(data)\n nx = nx * ny\n\n for i in range(nframes):\n matr = np.ravel(data[i,:,:])\n msumpix, mpix = eigerpix(matr, mNp, nx)\n mpix = mpix[:msumpix]\n pixels.append(mpix)\n s.append(msumpix)\n\n if len(sshape) == 2:\n nx, ny = np.shape(data)\n nx = nx * ny\n matr = np.ravel(data)\n msumpix, mpix = eigerpix(matr, mNp, nx)\n mpix = mpix[:msumpix]\n pixels.append(mpix)\n s.append(msumpix)\n\n # print(\"Compaction time %f\" % (time.time()-t0))\n\n return pixels, s",
"def __get_first_center(self):\n\n # Initialize list with uniform probabilities\n probabilities = [];\n\n # Fill probability list\n for i in range(len(self.__data)):\n probabilities.append((i + 1) / len(self.__data));\n\n return self.__data[self.__get_uniform(probabilities)];",
"def ts_center(images, kernel):\n counts = np.asanyarray(images['counts'])\n background = np.asanyarray(images['background'])\n kernel = kernel / kernel.sum()\n\n assert counts.shape == kernel.shape\n assert background.shape == kernel.shape\n\n C0 = stats.cash(counts, background)\n out = fit_amplitude(counts, background, kernel)\n C1 = stats.cash(counts, background + out['amplitude'] * kernel)\n # Cash is a negative log likelihood statistic,\n # thus the minus in the TS formula here\n out['ts'] = - 2 * (C1 - C0)\n return out",
"def callback_points(self, data):\n\t\t\n\t\tcloud_msg = pc2.read_points(data, field_names = (\"x\", \"y\", \"z\"), skip_nans=False)\n\t\tcloud_data = []\n\t\tfor p in cloud_msg:\n\t\t cloud_data.append([p[0],p[1],p[2]])\n\t\tcloud_data = np.array(cloud_data)\n\t\tself.cloud2 = np.reshape(cloud_data, [640, 480,3], order='F')",
"def find_roi(self):\n self.logger.info(\"Running find_roi to center frog trace around data\")\n I_tot = self.scan_proc_data.sum()\n xr = np.arange(self.scan_proc_data.shape[0])\n x_cent = (xr * self.scan_proc_data.sum(1)).sum() / I_tot\n yr = np.arange(self.scan_proc_data.shape[1])\n y_cent = (yr * self.scan_proc_data.sum(0)).sum() / I_tot\n self.logger.debug(\"Centroid position: {0:.1f} x {1:.1f}\".format(x_cent, y_cent))\n xw = np.floor(np.minimum(x_cent - xr[0], xr[-1] - x_cent)).astype(np.int)\n yw = np.floor(np.minimum(y_cent - yr[0], yr[-1] - y_cent)).astype(np.int)\n x0 = np.floor(x_cent - xw).astype(np.int)\n x1 = np.floor(x_cent + xw).astype(np.int)\n y0 = np.floor(y_cent - yw).astype(np.int)\n y1 = np.floor(y_cent + yw).astype(np.int)\n self.scan_roi_data = self.scan_proc_data[x0:x1, y0:y1]\n self.time_roi_data = self.time_data[x0:x1]\n self.wavelength_roi_data = self.wavelength_data[y0:y1]\n self.controller.scan_raw_data = self.scan_raw_data\n self.controller.scan_proc_data = self.scan_proc_data\n self.controller.scan_roi_data = self.scan_roi_data\n self.controller.wavelength_roi_data = self.wavelength_roi_data\n self.controller.time_roi_data = self.time_roi_data",
"def dollar_stip_extractor(self):\n gaussian = []\n ev_list, od_list = self.get_gabor_filter(0.8, range(len(self.frames)))\n\n for i in range(len(self.frames)):\n img = filters.gaussian(self.frames[i], sigma=1.5)\n gaussian.append(img)\n gaussian = np.array(gaussian)\n\n t, x, y = gaussian.shape\n stips = np.zeros([t + t - 1, x, y])\n for r in range(x):\n for c in range(y):\n stips[:, r, c] = np.convolve(gaussian[:, r, c], ev_list, mode='full') + np.convolve(\n gaussian[:, r, c], od_list, mode='full')\n coordinates = []\n print(int(t / 2))\n for i in range(int(t / 2), 2 * t - 1 - (int(t / 2)), 1):\n spatial_co = feature.peak_local_max(stips[i], min_distance=5, num_peaks=10)\n for co in spatial_co:\n coordinates.append((co[0], co[1], i - int(t / 2)))\n return gaussian, coordinates",
"def perform_centering(self):\r\n centered_data = self.data - np.repeat(self.mean_data[:, np.newaxis], self.data.shape[1], axis=1) + self.weight\r\n return centered_data",
"def get_obs(self):\n return self.map[(self.attention_x - self.attention_size//2):(self.attention_x + 1 + self.attention_size//2),\n (self.attention_y - self.attention_size//2):(self.attention_y + 1 + self.attention_size//2), :]\n # return self.map",
"def __call__(self, data: np.ndarray, threshold: float):\n # Find data in the on-time range\n ontime_inds = np.logical_and(data[:, 1] < self.tmax, data[:, 1] >= self.tmin)\n # Find data in the early range\n early_inds = data[:, 1] >= self.tmax\n # Find data in the late range\n late_inds = np.logical_and(data[:, 1] < self.tmin, data[:, 1] >= 0)\n\n inds = [early_inds, ontime_inds, late_inds]\n temp_output = []\n for ind in inds:\n # Select data for the current time range\n cur_data = data[ind, :]\n # Calculate the number of threshold crossings\n cur_sum = np.sum(cur_data[:, 2] >= threshold)\n if cur_sum > 0:\n temp_output.append(cur_sum)\n else:\n temp_output.append(np.nan)\n # Add the number of patients who were \"missed\"\n temp_output.append(int(all(np.logical_and(data[:, 1] >= 0, data[:, 2] < threshold))))\n return temp_output",
"def get_raw_events(self, event_filter):\n query = model_query(Meter, session=self.session)\n query = make_query_from_filter(query, event_filter,\n require_meter=False)\n events = query.all()\n\n for e in events:\n # Remove the id generated by the database when\n # the event was inserted. It is an implementation\n # detail that should not leak outside of the driver.\n e = row2dict(e)\n del e['id']\n yield e",
"def collectData(self):\n self.logger.trace(\"%s:%s() started... collectionTime=%r\" % (self.name, self.pfuncname(), self.collectionTime))\n self.lastExposureTime = self.collectionTime\n # Reset the zebra box before each arm, so that internal dividers are reset\n self.pvs['SYS_RESET.PROC'].caput(TIMEOUT, 1)\n self.pvs['PC_ARM'].caput(TIMEOUT, 1)\n\n# def setCollectionTime(self):\n \"\"\" Sets the collection time, in seconds, to be used during the next\n call of collectData.\n self.collectionTime defined by DetectorBase \"\"\"\n\n# def getCollectionTime(self):\n \"\"\" Returns the time, in seconds, the detector collects for during the\n next call to collectData()\n self.collectionTime defined by DetectorBase \"\"\"",
"def center(self) -> Tuple[np.ndarray, float]:\n return self._augment(Frame._img(self.center_img_path), self.steering)",
"def __call__(self, data: np.ndarray, threshold: float):\n\n # Find the candidate time points in the data\n if data == []:\n return [np.nan, np.nan]\n else:\n time_inds = np.logical_and(data[:, 1] >= self.tmin, data[:, 1] <= self.tmax)\n # Check that this patient has any data within tmin and tmax\n if any(time_inds):\n # Find the threshold crossings\n crossings = data[time_inds, 2] >= threshold\n # Determine if model output ever crossed the threshold\n pos = int(any(crossings))\n # Determine if model output never crossed the threshold\n neg = int(all(~crossings))\n return [pos, neg]\n # If no candidate time points exist return nan's\n else:\n return [np.nan, np.nan]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the values for each argument in a command.
|
def get_arg_vals(self):
return self.arg_vals
|
[
"def get_args(command):\n\n arglist = subprocess.Popen('for i in %s; do echo $i; done' % command, \n shell=True, \n stdout=subprocess.PIPE).communicate()[0]\n arglist = [i for i in arglist.split('\\n') if i]\n return arglist",
"def get_option_values(self):\n \n class CommandLineOptions(object):\n def __getattr__(self, name):\n # if an attribute can not be found, this is the last function called\n all_option_names=\", \".join(vars(self).keys())\n error_message=\"Unable to find option '{0}' in command line options.\\n\".format(name)\n error_message+=\"The available options are: {0}\".format(all_option_names)\n raise AttributeError(error_message)\n \n # get arguments from the command line (will not run again if already parsed)\n if not self._user_asked:\n self.ask_user()\n \n args=CommandLineOptions()\n for option in list(self._user_arguments.keys()) + list(self._arguments.keys()):\n option = re.sub(r'-', '_', option)\n value = self.get(option)\n setattr(args,option,value)\n \n return args",
"def argv(self):\n optlist = []\n for n in range(self.count):\n optlist.append(self.flag)\n if self.values is not None:\n optlist.append(self.values[n])\n return optlist",
"def extract_args(command):\r\n \r\n for s1, s2 in Matchquotes.findall(command)[:-1]:\r\n for arg in re.findall(r\"\\S+\", s1):\r\n for k,v in expstr_compile[0]: arg = k.sub(v,arg)\r\n yield arg\r\n if s2: yield s2",
"def args(self):\n result = []\n for key in self.conf.keys():\n result.append('--' + str(key))\n for item in self.conf[key]:\n result.append(str(item))\n return result",
"def args(self):\n if self.ready():\n return (self._result['args'], self._result['kwargs'])\n raise AttributeError",
"def read_args(self):\n cmd = []\n for index in sys.argv:\n cmd = cmd + index.split(\"=\")\n cmd.pop(0)\n\n\n for index , item in enumerate(cmd):\n if (index % 2 == 0):\n found = False\n \n if ('--help' == item):\n found = True\n if self.legacy == True:\n print(( self.m_help ))\n raise RuntimeError\n \n for flags in self.m_flags:\n if (item == flags): \n\n found = True\n self.m_commands[flags] = cmd[index+1] \n \n \n \n if not found:\n raise RuntimeError\n # ^^ raise an exception if any bad flag is found instead ^^\n # self.m_errors =True\n # self.m_bad_flags.append(item)",
"def _parse_args(self, cmd_name, cmd_str):\n\n args = []\n scanner = Scanner(cmd_str)\n\n for kind in getattr(self, cmd_name)._sig:\n if kind is str:\n args.append(scanner.next())\n elif kind is Ellipsis:\n args.append(scanner.nextLine())\n\n return args",
"def get_arguments(self) -> dict:\n pass",
"def get_args(popen_mock):\n cmd_args = \" \".join(popen_mock.call_args[0][0])\n return cmd_args, popen_mock.call_args[1]['env']",
"def get_args(self):\n if len(sys.argv) != self.num_expected_args + 1:\n # There are not enough arguments\n # Default to default_values \n return self.default_values\n \n # Use the provided arguments\n return sys.argv[1:]",
"def get_args(self, argset):\n args = []\n kwargs = {}\n for element in argset or []:\n if isinstance(element, dict):\n kwargs.update(element)\n else:\n args.append(element)\n return args, kwargs",
"def get_values(self):\n return dict(self._arg_values)",
"def get_arguments(self):\n self.__validate_clause()\n return map(lambda item: Entity(item), self.__item[PARAMS:])",
"def args_to_list(self):\n arg_list = [self.name]\n for arg_name, arg_value in self.args.items():\n if arg_value is None:\n arg_list.append(arg_name)\n else:\n arg_list.append(arg_name)\n arg_list.append(arg_value)\n return arg_list",
"def command_line_arguments():\n return sys.argv",
"def GetCommandLineOptions(self):\n return self.args_",
"def Args(pyparseSymbol):\n\treturn pyparseSymbol.setResultsName(\"args\", listAllMatches=True)",
"def args(self) -> Tuple[Any, ...]:\n args: List = list()\n argsappend = args.append\n argsextend = args.extend\n paramsget = self.parameters.__getitem__\n argumentsget = self.arguments.__getitem__\n for name in self._argnames:\n kind = paramsget(name).kind\n arg = argumentsget(name)\n if kind == VAR_POSITIONAL:\n argsextend(arg)\n else:\n argsappend(arg)\n return tuple(args)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Process inputs in batch, stores output or exception in buffer. Blocks until batch is ready for being processed, when batch is ready call a handler to process input data, if an exceptions is raised on handler store exceptions into all DataRequest inside buffer, if exception isn't raised store returned value from handler on each individual DataRequest object.
|
def _procces_in_batch(self) -> None:
if not self._handler:
raise HandlerNotSet()
start_at = time.time()
buffer = self._wait_buffer_ready()
elapsed_time = time.time() - start_at
# When _wait_for_ready_buffer is stopped buffer could be empty
# avoid calling process_batch() with empty list.
if not buffer:
return
buffer_size = len(buffer)
try:
input_data = buffer.get_inputs()
start_at = time.time()
batch_output = self._handler(input_data)
elapsed_time = time.time() - start_at
output_size = len(batch_output)
if buffer_size != output_size:
# This exception is going to be set in every DataRequest
raise BadBatchOutputSize(buffer_size, output_size)
except Exception as ex:
logger.warning("An exception occurs processing %s inputs", buffer_size)
buffer.set_exception(ex)
else:
buffer.set_outputs(batch_output)
logger.debug("Process %s elements in %s seconds", buffer_size, elapsed_time)
|
[
"def process_batch(self, batch: BatchType) -> None:\n raise NotImplementedError",
"def process_batch(self, batch: List[Dict[str, Any]]) -> List[Response]:\n pass",
"def process_data(self, data):\n for line in self.buffer.process_data(data):\n try:\n self.process_line(line)\n except Exception as e:\n on_exception.send(self, e=e)",
"def test_process_batch(self):\n batch = next(iter(self.instance.get_loader(batch_size=self.batch_size)))\n self.instance.process_batch(batch=batch)",
"def _perform_batch_inference(self, input_data, output_data, **kwargs):\n batch_strategy = kwargs[\"BatchStrategy\"]\n max_payload = int(kwargs[\"MaxPayloadInMB\"])\n data_source, batch_provider = self._prepare_data_transformation(input_data, batch_strategy)\n\n # Output settings\n accept = output_data[\"Accept\"] if \"Accept\" in output_data else None\n\n working_dir = self._get_working_directory()\n dataset_dir = data_source.get_root_dir()\n\n for fn in data_source.get_file_list():\n\n relative_path = os.path.dirname(os.path.relpath(fn, dataset_dir))\n filename = os.path.basename(fn)\n copy_directory_structure(working_dir, relative_path)\n destination_path = os.path.join(working_dir, relative_path, filename + \".out\")\n\n with open(destination_path, \"wb\") as f:\n for item in batch_provider.pad(fn, max_payload):\n # call the container and add the result to inference.\n response = self.local_session.sagemaker_runtime_client.invoke_endpoint(\n item, \"\", input_data[\"ContentType\"], accept\n )\n\n response_body = response[\"Body\"]\n data = response_body.read()\n response_body.close()\n f.write(data)\n if \"AssembleWith\" in output_data and output_data[\"AssembleWith\"] == \"Line\":\n f.write(b\"\\n\")\n\n move_to_destination(working_dir, output_data[\"S3OutputPath\"], self.name, self.local_session)\n self.container.stop_serving()",
"def process_batch_requests(self, batch_environ, start_response):\n payload = self._read_post_payload(batch_environ)\n requests = payload.get('batch', [])\n\n responses = []\n for request in requests:\n if not self._is_allowed_route(request['url']):\n responses.append(self._disallowed_route_response(request['url']))\n continue\n\n request_environ = self._build_request_environ(batch_environ, request)\n response = self._process_batch_request(request, request_environ, start_response)\n responses.append(response)\n\n batch_response_body = smart_str(json.dumps(responses))\n start_response('200 OK', [\n ('Content-Length', len(batch_response_body)),\n ('Content-Type', 'application/json'),\n ])\n return [batch_response_body]",
"def _process_batch(self, batch_data: List[tuple]) -> Tuple[Tensor, ...]:\n unpacked = zip(*batch_data)\n return tuple(judo.as_tensor(val) for val in unpacked)",
"def generate_batch_from_buffer(self, batch_size: int) -> BatchedProcessedInputs:\n pass",
"def process_packs(\n self, data_iter: Iterator[PackType]) -> Iterator[PackType]:\n buf = ProcessBuffer(data_iter, len(self._processors))\n\n if len(self.processors) == 0:\n yield from data_iter\n else:\n for job in buf:\n if not job.is_poison:\n s = self._selectors[job.step_num]\n for c_pack in s.select(job.pack):\n self._processors[job.step_num].process(c_pack)\n else:\n # Pass the poison pack to the processor, so they know this\n # is ending.\n self._processors[job.step_num].flush()\n\n # Put the job back to the process queue, if not success, that\n # means this job is done processing.\n if not buf.queue_process(job):\n if not job.is_poison:\n yield job.pack",
"def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:\n fake_imgs = []\n for pred in data_samples:\n fake_img_ = pred\n # get ema/orig results\n if self.sample_model in fake_img_:\n fake_img_ = fake_img_[self.sample_model]\n # get specific fake_keys\n if (self.fake_key is not None and self.fake_key in fake_img_):\n fake_img_ = fake_img_[self.fake_key]\n else:\n # get img tensor\n fake_img_ = fake_img_['fake_img']\n fake_imgs.append(fake_img_)\n fake_imgs = torch.stack(fake_imgs, dim=0)\n feat = self.extract_features(fake_imgs)\n feat_list = list(torch.split(feat, 1))\n self.fake_results += feat_list",
"def populate_buffer(self):\n while len(self.buffer) < self.buffer_size and not self.exhausted():\n try:\n model_input = [next(self.iterator) for _ in range(self.batch_size)]\n new_elems : Iterable[BatchElement] = self.generate(model_input)\n self.buffer += new_elems\n self.buffer_ready = True\n except StopIteration:\n self.iterator_exhausted = True\n break",
"def data_buffer_generic_class(self, inputs, inputs_labels, shuffle_data=False, batch_size=None, is_test=False,\n options=None, create_dataset_flag=False):\n x_batch = []\n y_batch = []\n if is_test:\n # TODO (@gabvaztor) Create process set to create new datasets\n x_batch, y_batch = process_test_set(inputs, inputs_labels, options, create_dataset_flag=create_dataset_flag)\n else:\n if shuffle_data and self.index_buffer_data == 0:\n self.input, self.input_labels = get_inputs_and_labels_shuffled(self.input, self.input_labels)\n else:\n self.input, self.input_labels = self.input, self.input_labels # To modify if is out class\n batch_size, out_range = self.get_out_range_and_batch() # out_range will be True if\n # next batch is out of range\n for _ in range(batch_size):\n x, y = process_input_unity_generic(self.input[self.index_buffer_data],\n self.input_labels[self.index_buffer_data],\n options)\n x_batch.append(x)\n y_batch.append(y)\n self.index_buffer_data += 1\n x_batch = np.asarray(x_batch)\n y_batch = np.asarray(y_batch)\n if out_range: # Reset index_buffer_data\n pt(\"index_buffer_data OUT OF RANGE\")\n self.index_buffer_data = 0\n return x_batch, y_batch",
"def next_batch(data_iter, data_loader, curr_epoch, device):\n try:\n data = data_iter.next()\n if len(data) == 2:\n inputs, targets = data\n perturbed_inputs = None\n elif len(data) == 3:\n inputs, targets, perturbed_inputs = data\n else:\n raise Exception(\"Data type not matched... Use STN dataset.\")\n\n except StopIteration:\n # Epoch finished.\n curr_epoch += 1\n data_iter = iter(data_loader)\n data = data_iter.next()\n if len(data) == 2:\n inputs, targets = data\n perturbed_inputs = None\n elif len(data) == 3:\n inputs, targets, perturbed_inputs = data\n else:\n raise Exception(\"Data type not matched.\")\n\n inputs, targets = inputs.to(device), targets.to(device)\n perturbed_inputs = perturbed_inputs if perturbed_inputs is None else perturbed_inputs.to(device)\n return inputs, perturbed_inputs, targets, data_iter, curr_epoch",
"def process(self, data, output=None, **kwargs):\n # process the data by the input processor\n data = _process((self.in_processor, data, kwargs))\n # process the data by the output processor and return it\n return _process((self.out_processor, data, output, kwargs))",
"def __call__(self, rpc):\n batch = self.batch\n\n # If RPC has resulted in an exception, propagate that exception to all\n # waiting futures.\n exception = rpc.exception()\n if exception is not None:\n for future in itertools.chain(*batch.values()):\n future.set_exception(exception)\n return\n\n # Process results, which are divided into found, missing, and deferred\n results = rpc.result()\n\n # For all deferred keys, batch them up again with their original\n # futures\n if results.deferred:\n next_batch = _get_lookup_batch()\n for key in results.deferred:\n next_batch.setdefault(key, []).extend(batch[key])\n\n # For all missing keys, set result to _NOT_FOUND and let callers decide\n # how to handle\n for result in results.missing:\n key = result.entity.key\n for future in batch[key]:\n future.set_result(_NOT_FOUND)\n\n # For all found entities, set the result on their corresponding futures\n for result in results.found:\n entity = result.entity\n for future in batch[entity.key]:\n future.set_result(entity)",
"def _predict_batch_worker(self):\n while True:\n ready = connection.wait(self.return_policy_value,timeout=0.001)\n if not ready:\n continue\n data, result_pipes = [], []\n for pipe in ready:\n while pipe.poll():\n data.append(pipe.recv())\n result_pipes.append(pipe)\n\n data = np.asarray(data, dtype=np.float32)\n # print (data.shape)\n \n policy_array, value_array = self.model.predict_on_batch(data)\n # print (policy_array, value_array)\n for pipe, policy, value in zip(result_pipes, policy_array, value_array):\n pipe.send((policy, float(value)))",
"def handle_input(self, instr):\n if type(instr) == bytes: # convert to string\n instr = instr.decode(encoding='utf8', errors='strict') # error can happen here if receiving TLS input over plain TCP\n if self._input_buffer != \"\":\n # will need to move to a list if writev comes around\n instr = self._input_buffer + instr\n self._input_buffer = \"\"\n if self._input_state == WAITING:\n if hdr_end.search(instr): # found one\n rest = self._parse_headers(instr)\n try:\n self.handle_input(rest)\n except RuntimeError:\n self.input_error(error.TooManyMsgsError)\n # we can't recover from this, so we bail.\n else: # partial headers; store it and wait for more\n self._input_buffer = instr\n elif self._input_state == HEADERS_DONE:\n try:\n handler = getattr(self, '_handle_%s' % self._input_delimit)\n except AttributeError:\n raise Exception(\"Unknown input delimiter %s\" % \\\n self._input_delimit)\n handler(instr)\n elif self._input_state == ERROR:\n pass # I'm silently ignoring input that I don't understand.\n else:\n raise Exception(\"Unknown state %s\" % self._input_state)",
"def process(self, data, **kwargs):\n # sequentially process the data\n for processor in self.processors:\n data = _process((processor, data, kwargs))\n return data",
"def _process_inputs(self,\n input_reader,\n shard_state,\n tstate,\n ctx):\n processing_limit = self._processing_limit(tstate.mapreduce_spec)\n if processing_limit == 0:\n return\n\n finished_shard = True\n # Input reader may not be an iterator. It is only a container.\n iterator = iter(input_reader)\n\n while True:\n try:\n entity = iterator.next()\n except StopIteration:\n break\n # Reading input got exception. If we assume\n # 1. The input reader have done enough retries.\n # 2. The input reader can still serialize correctly after this exception.\n # 3. The input reader, upon resume, will try to re-read this failed\n # record.\n # 4. This exception doesn't imply the input reader is permanently stuck.\n # we can serialize current slice immediately to avoid duplicated\n # outputs.\n # TODO(user): Validate these assumptions on all readers. MR should\n # also have a way to detect fake forward progress.\n\n if isinstance(entity, db.Model):\n shard_state.last_work_item = repr(entity.key())\n elif isinstance(entity, ndb.Model):\n shard_state.last_work_item = repr(entity.key)\n else:\n shard_state.last_work_item = repr(entity)[:100]\n\n processing_limit -= 1\n\n if not self._process_datum(\n entity, input_reader, ctx, tstate):\n finished_shard = False\n break\n elif processing_limit == 0:\n finished_shard = False\n break\n\n # Flush context and its pools.\n self.slice_context.incr(\n context.COUNTER_MAPPER_WALLTIME_MS,\n int((self._time() - self._start_time)*1000))\n\n return finished_shard"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Wrap `get_next_page()` to return a list so it's compatible with the Browser class expectation for `get_browsable`
|
def wrapper_next_page(url):
next_url = get_next_page(url)
if not next_url:
return []
return [next_url]
|
[
"def get_next_pages(self, driver):\n return driver.find_elements_by_xpath('//*[@class=\"PagerStyle\"]/td/table/tbody/tr/td/a')",
"async def fetch_paginated(\n client, bearer_token: str, url: str, data_key: str\n) -> List[Dict[str, Any]]:\n results: List[Dict[str, Any]] = []\n\n page_url = url # we'll modify it as we go\n for _ in range(MaxNPages):\n response = await client.get(\n page_url,\n headers={\n \"Authorization\": f\"Bearer {bearer_token}\",\n \"Accept\": \"application/json\",\n },\n )\n response.raise_for_status()\n data = response.json()\n if not isinstance(data, dict):\n raise RuntimeError(\"Intercom did not return a JSON Object\")\n if data_key not in data:\n raise RuntimeError(f'Intercom did not return \"{data_key}\" data')\n\n results.extend(data[data_key])\n\n if \"pages\" in data and data[\"pages\"][\"next\"]:\n page_url = data[\"pages\"][\"next\"]\n else:\n break\n\n return results",
"async def next(self):\n if not self._url_next:\n raise ValueError(f\"Last page has no next page available. (Page {self.page_number})\")\n json = await self._client._get(self._url_next)\n return NeoWsBrowsePage(self._client, json)",
"def paginate(self, response):\n\n #list of url strings for business pages to extract items from\n business_links = response.xpath('//td[@class=\"results_td_address\"]//a/ @href').extract()\n business_requests = [Request(url=urljoin('http://businessdirectory.bizjournals.com/', business_link),\n callback=self.extract) for business_link in business_links]\n\n #url string for the last page, of format <category_name>/page/<int>\n last_page_link = response.xpath('//div[@class=\"last\"]/a/ @href').extract()\n last_page = None\n try:\n last_page = int(last_page_link[0].rsplit('/', 1)[1])\n except IndexError:\n last_page = 1\n log.msg('Unable to find last_page link on {0}'.format(response.url), level=log.DEBUG)\n\n\n try:\n current_resource = response.url.rsplit('/', 1)[-1]\n next_page = int(current_resource)+1\n except Exception:\n #Not an int so must be on page 1\n next_page = 2\n\n #Guessing that we can grab the remaining category pages using the <category>/page/<int> pattern\n page_requests = []\n\n for page in range(next_page, last_page+1):\n page_requests.append(Request(url='http://businessdirectory.bizjournals.com/'+\n urljoin(last_page_link[0], str(page)), callback=self.paginate))\n\n return page_requests+business_requests",
"def next(self):\n self._item = self._input.get(self._item.get(\"next_page_expected\"))",
"def _list_all_pages(endpoint_obj, list_params: dict, *args, **kwargs):\n\n params = list_params.copy()\n\n # set default pagination count if not provided\n if 'count' not in params:\n params['count'] = '200'\n else:\n params['count'] = str(params['count'])\n\n # get first response\n response = endpoint_obj.list(*args, params=params, **kwargs)\n output = list(response.value)\n\n # keep getting pages while they are available\n while response.pagination.has_previous_page():\n time.sleep(1)\n try:\n response = endpoint_obj.list(\n *args,\n params=response.pagination.url_params_previous_page,\n **kwargs)\n\n output += list(response.value)\n except:\n pass\n\n return output",
"def _next_page(self, tags):\r\n selector = self._selectors(\"next\")\r\n next_page = self._get_tag_item(tags.select_one(selector), \"href\")\r\n url = (self._base_url + next_page) if next_page else None\r\n return {\"url\": url, \"data\": None}",
"def get_pages(**kwargs):\n pass",
"def _wikipedia_Page_linkedPages(self):\n return [page for page in toolserver.Generators.getPagelinks(self)]",
"def get_all_links_pages(total_pages):\n\tbase_url = 'http://torrentik.co'\n\tpage_part = '/page/'\n\tlinks_pages = []\n\tfor i in range(1, 2): # int(total_pages) + 1\n\t\turl = base_url + page_part + str(i)\n\t\tlinks_pages.append(url)\n\treturn links_pages",
"def next(self):\n self.pages[self.next_num]",
"def get_page(data, page):\n begin = page * 20\n end = page * 20 + 20\n if begin >= len(data):\n return []\n elif end >= len(data):\n return data[begin:]\n else:\n return data[begin:end]",
"def testNavigationGoIntegration(self):\n \n self.assert_(self.pageLen >= 5, \"Failed crawling more than 5 pages in %s.\" % gUrl )\n \n self.pageLen = 5\n \n iterResultPages = []\n nextResultPages = []\n previousResultPages = []\n stepResultPages = [None]*self.pageLen\n \n \n for i in range(self.pageLen):\n nextResultPages.append(self.crawler.get_page_info())\n if i < self.pageLen-1:\n self.crawler.go_next()\n \n for i in range(self.pageLen):\n previousResultPages.insert(0, self.crawler.get_page_info())\n if i < self.pageLen-1:\n self.crawler.go_previous()\n \n # get page 1, 3, 5, 4, 2\n self.crawler.go_recent()\n stepResultPages[0] = self.crawler.get_page_info()\n self.crawler.go_next(2)\n stepResultPages[2] = self.crawler.get_page_info()\n self.crawler.go_next(2)\n stepResultPages[4] = self.crawler.get_page_info()\n self.crawler.go_previous()\n stepResultPages[3] = self.crawler.get_page_info()\n self.crawler.go_previous(2)\n stepResultPages[1] = self.crawler.get_page_info()\n \n i = 0\n for page in self.crawler:\n iterResultPages.append(page)\n i += 1\n if i==self.pageLen:\n break\n \n # check result #\n for i in range(self.pageLen):\n self.assert_(stepResultPages[i].url == iterResultPages[i].url == \n nextResultPages[i].url == previousResultPages[i].url)\n self.assert_(stepResultPages[i].imageUrls == iterResultPages[i].imageUrls == \n nextResultPages[i].imageUrls == previousResultPages[i].imageUrls)",
"def _get_paginated_results(url):\n results = []\n while True:\n resp = requests.get(url)\n results.extend(_json_resp(resp))\n if \"next\" not in resp.links:\n break\n url = resp.links[\"next\"][\"url\"]\n return results",
"def list_records_next_page(self):\r\n return self._manager.list_records_next_page()",
"def _all_inner(self, fields, limit):\n response = self.session.get(self._get_url(self.table), params=self._get_formatted_query(fields, limit))\n yield self._get_content(response)\n while 'next' in response.links:\n self.url_link = response.links['next']['url']\n response = self.session.get(self.url_link)\n yield self._get_content(response)",
"def get_next_page_of_results(self):\n holding_dictionary = dict((\"http://www.indeed.co.uk\" + str(link.get('href')), False) for link in self.parsed_site.find_all('a') if re.findall(self.next_page_regex, link.get('href')))\n self.new_link_dictionary.update(holding_dictionary)",
"def test_get_multiple_pages_lro(self, client):\n from azure.mgmt.core.polling.arm_polling import ARMPolling\n poller = client.paging.begin_get_multiple_pages_lro(polling=ARMPolling(timeout=0))\n pager = poller.result()\n\n items = list(pager)\n\n assert len(items) == 10\n assert items[0].properties.id == 1\n assert items[1].properties.id == 2",
"def next_pagination_page(headers):\n link = headers.get('Link')\n if link is None:\n # If there is no next page, GitHub does not provide 'Link'\n return\n\n parts = link.split(',')\n for part in parts:\n if not part.endswith('rel=\"next\"'):\n continue\n\n matched = _PAGINATION_RE.match(part)\n return int(matched.group(1))",
"def test_api_bucketlist_next_and_previous_page_links(self):\n\n self.client().post('/v1/api/bucketlists/', data=json.dumps(self.bucketlist1),\n headers={\"Authorization\": \"Bearer \" + self.access_token['access_token'],\n \"Content-Type\": \"application/json\"})\n\n self.client().post('/v1/api/bucketlists/', data=json.dumps(self.bucketlist2),\n headers={\"Authorization\": \"Bearer \" + self.access_token['access_token'],\n \"Content-Type\": \"application/json\"})\n\n self.client().post('/v1/api/bucketlists/', data=json.dumps(self.bucketlist3),\n headers={\"Authorization\": \"Bearer \" + self.access_token['access_token'],\n \"Content-Type\": \"application/json\"})\n\n self.client().post('/v1/api/bucketlists/', data=json.dumps(self.bucketlist4),\n headers={\"Authorization\": \"Bearer \" + self.access_token['access_token'],\n \"Content-Type\": \"application/json\"})\n\n self.client().post('/v1/api/bucketlists/', data=json.dumps({\"name\":\"Learn Piano\"}),\n headers={\"Authorization\": \"Bearer \" + self.access_token['access_token'],\n \"Content-Type\": \"application/json\"})\n\n self.client().post('/v1/api/bucketlists/', data=json.dumps({\"name\": \"Learn Guitar\"}),\n headers={\"Authorization\": \"Bearer \" + self.access_token['access_token'],\n \"Content-Type\": \"application/json\"})\n\n get_response = self.client().get('/v1/api/bucketlists/?start=1&limit=3',\n headers={\"Authorization\": \"Bearer \" + self.access_token['access_token'],\n \"Content-Type\": \"application/json\"})\n\n data = json.loads(get_response.data)\n\n self.assertEqual(get_response.status_code, 200)\n self.assertIn(data['next'], '/v1/api/bucketlists/?start=4&limit=3', \"Next page link not provided\")\n self.assertIn(data['previous'], '', 'Previous link should be empty for start of 1')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Determines if the current page is the last one showing listings.
|
def is_last_page(soup):
for li in soup.find_all("li"):
if li.has_attr("class") and li.attrs["class"] == ["next", "ng-hide"]:
return True
return False
|
[
"def is_last_page(self):\n return 'last' not in self.links",
"def is_last_page(xml):\n #Get information from the page\n #matched=matched_items(xml)\n first_displayed,last_displayed=current_items(xml)\n #Check lastness\n return first_displayed>last_displayed",
"def is_on_last_item(self):\n return self.index == len(self) - 1",
"def is_last(self):\n return self._order == \\\n len(self.parent_node.idevices.get_queryset()) - 1",
"def isLast(self):\n pass",
"def is_only_page(self):\n return self.is_first_page and self.is_last_page",
"def __goToLastPage(self):\n try:\n pagination_tag = self.soup.find('div', 'pagenav')\n if not pagination_tag:\n return\n uri = None\n last_page_tag = pagination_tag.find('a', title=re.compile('Last Page'))\n if last_page_tag:\n uri = last_page_tag['href']\n else:\n last_page_tag = pagination_tag.findAll('a', href=True, text=re.compile('^\\d+$'))\n if last_page_tag:\n uri = last_page_tag[-1].parent['href']\n if not uri:\n log.info(self.log_msg('Post found in only one page'))\n return\n data_dict = dict(parse_qsl(uri.split('?')[-1]))\n if 's' in data_dict.keys():\n data_dict.pop('s')\n self.currenturi = self.__baseuri + 'showthread.php?'+ urlencode(data_dict)\n self.__setSoupForCurrentUri()\n except:\n log.info(self.log_msg('Last page cannot find from the given page no \\\n for url %s'%self.task.instance_data['uri']))",
"def _get_isLastUsedShown(self) -> \"bool\" :\n return _core.SplitButtonControl__get_isLastUsedShown(self)",
"def isLast(entity):",
"def is_last_question(self):\n questions = self.module.question_set\n return self == questions.last()",
"def is_last_module(self):\n modules = self.course.module_set\n return self == modules.last()",
"def is_first_page(self):\n return 'first' not in self.links",
"def __goToLastPage(self):\n try:\n pagination_tag = self.soup.find('div', 'pages')\n if not pagination_tag:\n log.info(self.log_msg('pagination not found, posts exists in current\\\n url%s'%self.currenturi))\n return \n list_of_page_links = pagination_tag.findAll('img', alt='Next')\n if not list_of_page_links:\n log.info(self.log_msg('pagination not found, posts exists in current\\\n url%s'%self.currenturi))\n return\n self.currenturi = 'http://forums.webmd.com' + list_of_page_links[-1].parent['href']\n self.__setSoupForCurrentUri()\n except:\n log.info(self.log_msg('Last page cannot find from the given page no \\\n for url %s'%self.task.instance_data['uri']))",
"def is_last_iter(self, trainer):\n return trainer.iter + 1 == trainer.max_iters",
"def is_resting(self):\n progress = self.time_displaying_page - self.get_minimum_time_to_show_full_page()\n return progress > 0 and progress <= self.RESTING_PERIOD",
"def paginated(self):\n return len(self) > 1",
"def is_last_node(self, node):\n return True if self.get_last_node() == node else False",
"def _is_current_page(self):\n self.selenium.wait_until_location_contains(\"GE_Gift_Entry\", timeout=60, \n message=\"Current page is not Gift Entry landing page\")\n locator=npsp_lex_locators[\"gift_entry\"][\"id\"].format(\"datatable Batches\") \n self.selenium.wait_until_page_contains_element(locator)",
"def scroll_bar_at_bottom(self):\n bar = self.verticalScrollBar()\n if not bar:\n return True\n return bar.value() == bar.maximum()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the real estate listing ID from the URL. If parsing the ID fails, we return a random string.
|
def get_listing_id(url):
match = re.search(r"\/(\w+)$", url)
if match:
return match.group(1)
else:
return "".join(random.choice(ascii_letters) for _ in range(10))
|
[
"def get_listing_id(url):\n match = re.search(r\"\\/([\\dA-Z\\-]*)$\", url)\n if match:\n return match.group(1)\n else:\n return \"\".join(random.choice(ascii_letters) for _ in range(10))",
"def parseID(self,url):\n\tif validateUrl(url):\n\t splitURL = (url).split(\"/\")\n\t itemID = \"BHL-\" + splitURL[4].split('#')[0]\n\telse:\n\t return \"URL not valid\"\n\treturn itemID",
"def get_seamus_id_from_url(url):\n if url.startswith('http://www.npr.org') or url.startswith('http://npr.org'):\n url_parts = url.split('/')\n id = url_parts[-2]\n if id.isdigit():\n return id\n\n return None",
"def parse_unique_id_from_url(page_url):\n\ttokens = page_url.split(\"/\")\n\tlast_token = tokens[len(tokens)-1]\n\tbook_id = \"\"\n\tfor char in last_token:\n\t\tif char.isdigit():\n\t\t\tbook_id += char\n\t\telse:\n\t\t\tbreak\n\treturn int(book_id)",
"def id_from_uri(self, something):\n\t\tif something is None:\n\t\t\treturn None\n\t\t\n\t\tif isinstance(something, (int, long)):\n\t\t\treturn something\n\t\t\n\t\t# get the bare URI\n\t\turi = unicode(something)\n\t\tif '>' == uri[-1:]:\n\t\t\turi = uri[1:-1]\n\t\t\n\t\t# split and return the last component\n\t\treturn os.path.split(uri)[1]",
"def get_id(self, url):\n return self.get_ids([url])[0]",
"def _get_id(self, item):\n link = item.attrib[\"href\"]\n # Searches for a string containing an arbitrary number of text characters (\\D+)\n # then an arbitrary number of figures (\\d+)\n # Extract the figures since this is the fund id\n p = re.compile(\"\\D+(\\d+)\")\n m = p.match(link)\n if m is None:\n # Found nothing\n return \"\"\n fund_id = m.group(1)\n return fund_id",
"def find_id(url, id_sequence):\n # find the parts of the string that match id_sequence\n if re.search(id_sequence, url):\n id_number = re.search(id_sequence, url).group()\n else:\n id_number = None\n return id_number",
"def get_location_id(url: str) -> str:\n loc_id = parse(LOCATION_FORMAT, url)\n if loc_id is not None and not isinstance(loc_id, Match):\n if len(loc_id.spans) != 1: # pragma: no cover\n raise ValueError(f\"Format error: {url}\")\n return loc_id[0]\n raise ValueError(f\"{url} not a recognized format\") # pragma: no cover",
"def get_ticket_id_from_url(self, url):\n re_id = re.compile(r'/ticket/(?P<id>[0-9]+)')\n m = re_id.search(url)\n if m:\n return int(m.group('id'))\n # Print the url for debugging if there is not ticket number\n raise TwillAssertionError(\"No ticket id found in: %s\" % url)",
"def company_id(url):\n p = re.compile('-\\d+')\n aa = re.search(p, url).group()[1:]\n return aa",
"def find_user_id(url):\n html = urllib.request.urlopen(url).read().decode('utf-8')\n\n m = re.search(r\"href=\\\"/services/feeds/photos_public.gne\\?([^\\\"]+)\", html)\n if m:\n h = HTMLParser()\n uid = h.unescape(m.group(1))\n uid = uid[3:uid.index(\"&\")]\n return uid\n else:\n return None",
"def CalcSrcID( url ):\n o = urlparse.urlparse( url )\n if not re.match( \"(\\w+[.])?ft[.]com$\", o[1] ):\n return None\n\n m = art_idpat.search( url )\n if m:\n return 'ft_' + m.group(1)\n m = blog_idpat.search( url )\n if m:\n return 'ftblog_' + m.group(1)\n\n return None",
"def CalcSrcID( url ):\n o = urlparse.urlparse(url)\n if not o[1].lower().endswith( 'latimes.com' ):\n return None\n return url",
"def get_tweet_id(tweet_url):\n tweet_id = re.search(r'\\d+$', tweet_url)\n return tweet_id.group(0)",
"async def id64_from_url(url: StrOrURL, /, session: aiohttp.ClientSession | None = None) -> ID64 | None:\n\n if not (search := URL_REGEX.match(str(url))):\n return None\n\n async with (\n aiohttp.ClientSession() if session is None else nullcontext(session) as session,\n session.get(f\"https://{search['clean_url']}\") as r,\n ):\n text = await r.text()\n\n if search[\"type\"] in USER_URL_PATHS:\n data = JSON_LOADS(match[\"json\"]) if (match := USER_ID64_FROM_URL_REGEX.search(text)) else None\n else:\n data = CLAN_ID64_FROM_URL_REGEX.search(text)\n return ID64(int(data[\"steamid\"])) if data else None",
"async def id64_from_url(url: StrOrURL, session: aiohttp.ClientSession | None = None) -> int | None:\n\n search = URL_REGEX.search(str(url))\n\n if search is None:\n return None\n\n gave_session = session is not None\n session = session or aiohttp.ClientSession()\n\n try:\n if search[\"type\"] in (\"id\", \"profiles\"):\n # user profile\n r = await session.get(search[\"clean_url\"])\n text = await r.text()\n data_match = USER_ID64_FROM_URL_REGEX.search(text)\n data = json.loads(data_match[\"json\"])\n else:\n # clan profile\n r = await session.get(search[\"clean_url\"])\n text = await r.text()\n data = CLAN_ID64_FROM_URL_REGEX.search(text)\n return int(data[\"steamid\"])\n except (TypeError, AttributeError):\n return None\n finally:\n if not gave_session:\n await session.close()",
"def extract_spotify_id(url):\n # Check if the URL is a valid Spotify playlist URL using regex\n pattern = r\"https?://open.spotify.com/playlist/[\\w]+\"\n if not re.match(pattern, url):\n return None\n\n # Parse the URL\n parsed_url = urlparse(url)\n\n # Extract the playlist ID from the URL path\n playlist_id = parsed_url.path.split(\"/\")[-1]\n\n return playlist_id",
"def fileid_from_url(url):\r\n raw_fileid = re.findall(\"~[A-z.]+/[0-9]+\", url)[0][1:]\r\n return raw_fileid.replace('/', ':')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create department for test
|
def create_department():
return Department.objects.create(name='Development')
|
[
"def test_create_department_succeeds(self, client, dept_data):\n\n data = dept_data['test_dept']\n response = client.post('/api/v1/department/', data)\n assert response.status_code == 201\n assert response.data['message'] == SUCCESS['create_entry'].format(\n data['name'])",
"def test_department_model(self, add_org, dept_data):\n\n data = dept_data['test_dept']\n data.update({'organization': add_org})\n department = Department(**data)\n department.save()\n assert Department.objects.get(name=data['name'])\n assert len(Department.objects.all()) >= 1",
"def test_create_valid(self):\n dp = baker.make(\"DegreeProgram\")\n rub = baker.make(\"Rubric\")\n f = CreateReportByDept({\n 'year':2017,\n 'degreeProgram':dp.pk,\n 'rubric': rub.pk\n },dept=dp.department.pk\n )\n self.assertTrue(f.is_valid())",
"def setup_db_department():\n # Initialize key variables\n idx_department = 1\n\n # Create a dict of all the expected values\n expected = {\n 'enabled': 1,\n 'name': general.hashstring(general.randomstring()),\n 'idx_department': idx_department,\n 'code': general.hashstring(general.randomstring()),\n }\n\n # Drop the database and create tables\n initialize_db()\n\n # Insert data into database\n data = Department(\n code=general.encode(expected['code']),\n name=general.encode(expected['name']))\n database = db.Database()\n database.add_all([data], 1048)\n\n # Return\n return expected",
"def test_create_department_invalid_values_fails(self, client, dept_data):\n data = dept_data['test_dept']\n data.update({'name': \"$$$\"})\n response = client.post('/api/v1/department/', data)\n assert response.status_code == 400\n assert response.data['message'] == VALIDATION['unsuccessful']\n assert 'name' in response.data['errors']",
"def setUp(self):\n super().setUp()\n self.dept = baker.make_recipe(\"makeReports.department\")",
"def create_departments_for_hospital(sender, instance, created, **kwargs):\n if created:\n departments = list()\n for specialty in Specialty.objects.all():\n departments.append(Department(\n hospital=instance,\n name=\"Department of %s\" % specialty.name,\n specialty=specialty,\n contact_name=instance.contact_name,\n contact_position=instance.contact_position,\n email=instance.email,\n phone=instance.phone,\n extension=instance.extension,\n has_requirement=instance.has_requirement,\n requirement_description=instance.requirement_description,\n requirement_file=instance.requirement_file,\n ))\n \n Department.objects.bulk_create(departments)",
"def test_get_single_department_succeeds(self, client, add_dept):\n\n response = client.get(f'/api/v1/department/{add_dept.id}/')\n assert response.status_code == 200\n assert response.data['data']['name'] == add_dept.name",
"def new (deptCode = None,\n name = None,\n managerID = None,\n mission = None):\n newDepartment = Department (None,\n deptCode,\n name,\n managerID, 0, 1)\n newDepartment.updateMission (None)\n newDepartment.save ()\n newDepartment.updateMission (mission)\n newDepartment.save ()\n return newDepartment",
"def test_create_valid(self):\n c = baker.make(\"College\",active=True)\n f = CreateDepartmentForm({\n 'name':'Psychology',\n 'college': c.pk\n })\n self.assertTrue(f.is_valid())",
"def add_departments():\n logger.info('Working with Department class')\n logger.info('Creating Department records')\n\n DEPT_NUM = 0\n DEPT_NAME = 1\n DEPT_MGR = 2\n\n departments = [\n ('DA', 'Dark Arts', 'Voldemort'),\n ('STU', 'Student', 'Minerva McGonnigal'),\n ('ADM', 'Administration', 'Ministry of Magic'),\n ('EDU', 'Education', 'Albus Dumbledore')\n ]\n\n try:\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n for dept in departments:\n with database.transaction():\n new_dept = Department.create(\n department_number=dept[DEPT_NUM],\n department_name=dept[DEPT_NAME],\n department_manager=dept[DEPT_MGR])\n new_dept.save()\n logger.info('Database add successful')\n\n logger.info(\n 'Reading and print all Department rows ...')\n for dept in Department:\n logger.info(f'{dept.department_number} : {dept.department_name} manager : {dept.department_manager}')\n\n except Exception as e:\n logger.info(f'Error creating = {dept[DEPT_NAME]}')\n logger.info(e)\n\n finally:\n logger.info('database closes')\n database.close()",
"def add_department():\n\tcheck_admin()\n\tadd_dempartment = True\n\tform = DepartmentForm()\n\tif request.method == 'POST' and form.validate():\n\t\tdepartment = Department(name=form.name.data, description=form.description.data)\n\n\t\ttry:\n\t\t\t# Add the department in the database\n\t\t\tdb.session.add(department)\n\t\t\tdb.session.commit()\n\t\t\tflash('The department has successfully added in the database')\n\t\texcept:\n\t\t\t# In case there are some error (the department is in the database).\n\t\t\tflash(\"The department you are trying to add is already exist in the database\")\n\t\treturn redirect(url_for('admin.list_departments'))\n\n\treturn render_template('admin/departments/department.html', action='Add', form=form\n\t\t, add_department=add_department, title='Add Department')",
"def test_get_department_list_succeeds(self, client, add_dept):\n response = client.get('/api/v1/department/')\n assert response.status_code == 200\n assert len(response.data['data']) >= 1",
"def create_departments_for_specialty(sender, instance, created, **kwargs):\n if created:\n departments = list()\n for hospital in Hospital.objects.all():\n departments.append(Department(\n hospital=hospital,\n name=\"Department of %s\" % instance.name,\n specialty=instance,\n contact_name=hospital.contact_name,\n contact_position=hospital.contact_position,\n email=hospital.email,\n phone=hospital.phone,\n extension=hospital.extension,\n has_requirement=hospital.has_requirement,\n requirement_description=hospital.requirement_description,\n requirement_file=hospital.requirement_file,\n ))\n Department.objects.bulk_create(departments)",
"def test_faculty_str(self):\n faculty = models.Faculty.objects.create(\n faculty_id='CS1234',\n name='TestFaculty',\n designation='HOD',\n department='Computer Science and Engineering',\n central_responsibility='',\n status='',\n date_of_joining='2008-05-28',\n mobile_number='9187654321',\n email='hodbit@gmail.com',\n FAP_2021_Score=0.54,\n FRP_2021=1.00,\n FRS_2021=14\n )\n self.assertEqual(str(faculty), faculty.department)",
"def department(cls):\r\n return cls.random_element(cls.departments)",
"def test_create_investment(self):\n pass",
"def test_delete_department_succeeds(self, client, add_dept):\n\n response = client.delete(f'/api/v1/department/{add_dept.id}/')\n assert response.status_code == 200\n assert response.data['message'] == SUCCESS['delete_entry'].format('department')",
"def test_portals_id_designs_post(self):\n pass",
"def createTable (self):\n self.server.sql (\"\"\"create table Department (\n departmentID numeric (8, 0) identity not null,\n deptCode int,\n name varchar (50),\n mission text null,\n managerID numeric (8, 0) null)\"\"\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This method registers signal handlers which will do certain stuff before the core terminates
|
def register_signal_handler(self):
signal.signal(signal.SIGINT, self.quit_gracefully)
signal.signal(signal.SIGTERM, self.quit_gracefully)
return
|
[
"def _setup_signal_handling(self):\n signal.signal(signal.SIGINT, self._signal_handler)\n signal.signal(signal.SIGQUIT, self._signal_handler)",
"def configure_signals():\n\n def stopping_handler(signum, frame):\n \"\"\"Handle signal and exit\"\"\"\n frame_data = format_frame(frame)\n logging.info(\"interrupt signal %s, frame %s received, stopping\", signum, frame_data)\n app_exit()\n\n signal.signal(signal.SIGINT, stopping_handler)\n signal.signal(signal.SIGTERM, stopping_handler)",
"def signal_handler(signum, frame):\n\n raise ProgramKilledError",
"def setup(self):\r\n\t\tif self.hasSignalModule and not self.signalsRegistered:\r\n\t\t\t# Jython does not support all signals, so we only use\r\n\t\t\t# the available ones\r\n\t\t\tsignals = ['SIGINT', 'SIGHUP', 'SIGABRT', 'SIGQUIT', 'SIGTERM']\r\n\t\t\timport signal\r\n\t\t\tfor sig in signals:\r\n\t\t\t\ttry:\r\n\t\t\t\t\tsignal.signal(getattr(signal, sig), self._shutdown)\r\n\t\t\t\t\tself.signalsRegistered.append(sig)\r\n\t\t\t\texcept Exception, e:\r\n\t\t\t\t\tLogger.Err(\"[!] daemonwatch.Signals._registerSignals:%s %s\\n\" % (sig, e))",
"def install_termination_logging_signal_handlers():\n\n # noinspection PyUnusedLocal\n def sig_handler(signum, frame):\n signames = [\n n for n, v in signal.__dict__.items()\n if n.startswith('SIG') and v == signum\n ]\n signame = signames and ' (%s)' % signames[0] or ''\n logging.info(\"Terminating with signal %d%s.\" % (signum, signame))\n sys.exit(2) # calls exit_function\n\n for s in range(100):\n if s not in (signal.SIGCHLD, signal.SIGURG, signal.SIGWINCH):\n try:\n signal.signal(s, sig_handler)\n except: # noqa\n pass",
"def set_signal_handler():\n def signal_handler(signalnum: int, frame: TypeVar(\"Frame\")):\n \"\"\"Handle Ctrl-C signals(KeyboardInterupt) gracefully.\n\n Parameters\n ----------\n signalnum: int\n signam identifier\n frame: Frame\n current stack frame\n \"\"\"\n print(\"\\nAborting by user request...\")\n sys.exit()\n\n signal.signal(signal.SIGINT, signal_handler)",
"def sigterm_handler(signum, stack):\n poap_log(\"INFO: SIGTERM Handler\")\n if (len(options[\"install_path\"]) != 0 and options[\"mode\"] != \"personality\"):\n abort(\"Cleaning up rpms\")\n else:\n cleanup_files()\n log_hdl.close()\n exit(1)",
"def signal_handler(sig, frame):\n print('\\nBye! See you soon...')\n sys.exit(0)",
"def signal_handler(self, signum, frame):\n exit(0)",
"def signalHandler(signal, frame):\r\n global Processes\r\n print('Stopping Lagramge!')\r\n for proc in Processes:\r\n proc.send_signal(signal)",
"def signal_handler(sig_num, frame):\n\n global exit_flag\n if sig_num == signal.SIGINT:\n logger.warning(\n \" SIGINT recieved from the os: program terminated w/ ctr-c\"\n )\n exit_flag = True\n elif sig_num == signal.SIGTERM:\n logger.warning(\" SIGTERM recieved from the os: program terminated\")\n exit_flag = True",
"def sigterm_handler(_signo, _stack_frame):\n do_exit()",
"def _handle_sigterm(signum, stackframe):\n global _HANDLING\n if _HANDLING:\n return\n _HANDLING = 1\n _cleanup()\n # call the previous handler\n if _PREV_SIGTERM is not None:\n signal.signal(signal.SIGTERM, _PREV_SIGTERM)\n os.kill(os.getpid(), signum)",
"def signal_handler(signum, frame):\n print(glog.red(f\"Caught signal {signal.Signals(signum).name}. Exiting...\"))\n close_section_logs()\n sys.exit(0)",
"def _sigint_handler(self, signum, frame):\n self._sigint_caught = True\n logger.debug(\" >> SIGINT caught.\")",
"def restore(self):\n signal.signal(signal.SIGINT, self.sig_handler[signal.SIGINT])\n signal.signal(signal.SIGTERM, self.sig_handler[signal.SIGTERM])",
"def sigterm_handler(signum, frame):\n global monitor\n logmain = logging.getLogger('sigterm_handler')\n logmain.info('Received TERM signal, cleaning up')\n monitor.cleanup()\n exit(0)",
"def run_term_signal_handler(sig, frame):\n # pylint: disable=unused-argument\n if _config.VERBOSE_PROCESSES_ENABLED:\n print_out(\"Run process: Received termination signal ({})\".format(sig))\n\n # This triggers the registered exit handler run_exit_handler()\n raise SystemExit(1)",
"def __init__(self):\n self._sigint_caught = False\n self._sigint_response = None\n signal.signal(signal.SIGINT, self._sigint_handler)",
"def signals(*signames):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Find and return positions of pattern in genome.
|
def positions_of_pattern_in_genome(pattern, genome):
return [i for i in range(len(genome) - len(pattern) + 1) if genome[i:i+len(pattern)] == pattern]
|
[
"def pattern_indices(pattern,gene_sequence):\n indices = []\n pattern_seen = False\n pattern_start_index = 0\n for i in range(0,len(gene_sequence)-len(pattern)+1):\n tmp = gene_sequence[i:i+len(pattern)]\n if(tmp == pattern):\n indices.append(i) \n return indices",
"def get_pattern_position(pattern,in_text):\n if in_text.find(pattern) == -1 :\n return in_text.find(pattern)\n else:\n return in_text.find(pattern)+1",
"def get_reference_pattern_coordinates(self, pattern):\r\n try:\r\n matched_pattern = find(self._pattern_finder._find_pattern(pattern))\r\n coordinates = (matched_pattern.getX(), matched_pattern.getY(), matched_pattern.getW(), matched_pattern.getH())\r\n return coordinates\r\n except FindFailed, err:\r\n raise AssertionError(\"Unable to find matching pattern '%s'.\" % (pattern))",
"def ApproximatePatternMatching(Pattern, Text, d):\r\n positions = []\r\n for i in range(len(Text) - len(Pattern) + 1):\r\n x=Pattern\r\n y = Text[i:i + len(Pattern)]\r\n if HammingDistance(x,y)<=d:\r\n positions.append(i)\r\n return positions",
"def find_all(s, pattern):\n shift_on_match = 1\n i = 0\n indexes = []\n while 1:\n i = string.find(s, pattern, i)\n if i >= 0:\n indexes.append(i)\n i = i + shift_on_match\n else:\n break\n return indexes",
"def findSite(seq, restrSite):\n posList = []\n for i in range(0, len(seq)-len(restrSite)+1):\n subseq = seq[i:i+len(restrSite)]\n #print subseq==restrSite, subseq, restrSite,\"<br>\"\n\n # JP does not want any potential site to be suppressed\n #if i<len(restrSite):\n #isMatch = patMatch(subseq, restrSite, len(restrSite)-i-1)\n #else:\n #isMatch = patMatch(subseq, restrSite)\n isMatch = patMatch(subseq, restrSite)\n\n if isMatch:\n posList.append( (i, i+len(restrSite)) )\n return posList",
"def find_pattern_in_process_memory(pattern, pid, read_chunk=0xffff, start_addr=0, end_addr=0x7fffffff):\n found_addresses = []\n buf = ctypes.create_string_buffer(read_chunk)\n bytes_read = ctypes.c_size_t()\n process_handle = OpenProcess(PROCESS_ALL_ACCESS, False, pid)\n # scan memory\n for i in xrange(start_addr, end_addr, read_chunk):\n base_address = i\n res = ReadProcessMemory(process_handle, base_address, buf, read_chunk, ctypes.byref(bytes_read))\n if res:\n pos = 0\n while pos > -1:\n pos = buf.raw.find('%s' % pattern, pos + 1)\n if pos > -1:\n found_addresses.append(base_address + pos)\n return found_addresses",
"def find_middlemost(seq, pattern, num_matches=1):\n import regex as re\n matches = [x for x in re.finditer(pattern, seq, overlapped=True)]\n\n if len(matches) < num_matches:\n raise ValueError(\"'{}' found fewer that {} time(s) in '{}'\".format(\n pattern, num_matches, seq))\n\n # Find every index matched by the pattern. If the pattern has multiple \n # capturing groups, iterate to find the one that matches.\n\n indices = []\n for match in matches:\n i, group = -1, 0\n while i < 0:\n group += 1\n i = match.start(group)\n indices.append(i)\n\n # Sort the list of indices by how close they are to the middle of the given \n # sequence.\n\n dist_to_middle = lambda i: abs(i - (len(seq)-1) / 2)\n indices.sort(key=dist_to_middle)\n return [(i, len(seq) - i - 1) for i in indices[:num_matches]]",
"def scan_pattern_page(handle, address, pattern, mask):\n mbi = pymem.memory.virtual_query(handle, address)\n next_region = mbi.BaseAddress + mbi.RegionSize\n allowed_protections = [\n pymem.ressources.structure.MEMORY_PROTECTION.PAGE_EXECUTE_READ,\n pymem.ressources.structure.MEMORY_PROTECTION.PAGE_EXECUTE_READWRITE,\n pymem.ressources.structure.MEMORY_PROTECTION.PAGE_READWRITE,\n pymem.ressources.structure.MEMORY_PROTECTION.PAGE_READONLY,\n ]\n if (mbi.state != pymem.ressources.structure.MEMORY_STATE.MEM_COMMIT or\n mbi.protect not in allowed_protections\n ):\n return next_region, []\n page_bytes = pymem.memory.read_bytes(handle, address, mbi.RegionSize)\n\n found = None\n for offset in range(0, (mbi.RegionSize - len(pattern)), 1):\n partial = page_bytes[offset:offset + len(pattern)]\n for x in range(len(pattern)):\n if mask[x] == '?':\n continue\n if mask[x] == 'x' and not partial[x] == pattern[x]:\n break\n else:\n found = address + offset\n del page_bytes\n return None, found\n return next_region, found",
"def find_exacte_matching(pattern, string):\n\n location_indexes = []\n\n for i in range(len(string) - len(pattern)+1):\n match = True\n for n in range(len(pattern)):\n if string[i+n] != pattern[n]:\n match = False\n break\n if match:\n location_indexes.append(i)\n return location_indexes",
"def findPosition(self,i): # TEST\n return self.abstract.findPosition(self.notes[i])",
"def find_all_indexes(text, pattern):\n assert isinstance(text, str), 'text is not a string: {}'.format(text)\n assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)\n # TODO: Implement find_all_indexes here (iteratively and/or recursively)\n\n all_indices = [] # create empty list to append all indices that given pattern appears at\n\n if pattern == \"\":\n for i in range(len(text)):\n all_indices.append(i)\n return all_indices\n\n for i in range(len(text) - len(pattern) + 1): \n for j in range(len(pattern)): \n if pattern[j] != text[i + j]: # text[i + j] --> index i increases by the value of j so if j is 0 then i(2) + j(0) = i(2), if j is 1 then i(2) + j(1) = i(3)\n break\n else: # else, append the index into the list\n all_indices.append(i)\n return all_indices\n\n\n # Not passing the test case for an empty string\n # if pattern == '':",
"def find_pos(self):\n self.y = 0\n for d in self.data:\n try:\n self.x = d.index('m')\n return\n except ValueError:\n self.y += 1",
"def findPat(seq, pat):\n seq = seq.upper()\n pat = pat.upper()\n for i in range(0, len(seq)-len(pat)+1):\n #print \"new pos\", i, seq[i:i+len(pat)],\"<br>\"\n found = True\n for x in range(0, len(pat)):\n #print \"new step\", x, \"<br>\"\n if pat[x]==\"N\":\n #print \"N\",\"<br>\"\n continue\n seqPos = i+x\n if seqPos == len(seq):\n found = False\n break\n if not matchNuc(pat[x], seq[seqPos]):\n #if not patMatch(seq[seqPos], pat[x]):\n #print i, x, pat[x], seq[seqPos], \"no match<br>\"\n found = False\n break\n #print \"match\", i, x, found, \"<br>\"\n if found:\n #print \"yielding\", i, \"<br>\"\n yield i",
"def findCodeMultiple(self, signature, expectedCount=None):\n sig = Signature(signature)\n addrs = [self.entryPoint.add(o + (self.relocSegment << 4)\n - self.entryPoint.linear)\n for o in sig.find(self.image._data)]\n if expectedCount is not None and len(addrs) != expectedCount:\n raise SignatureMatchError(\"Signature found %d times, expected to \"\n \"find %d. Matches: %r\" %\n (len(addrs), expectedCount, addrs))\n log(\"Found patch location %r in %s for: %r\" % (\n addrs, self.basename, sig.shortText))\n return addrs",
"def get_for_pattern(self, pattern):",
"def findCCMatches(self,seg,sg,thr):\n from skimage.feature import match_template\n\n # seg and sg have the same $y$ size, so the result of match_template is 1D\n #m = match_template(sg,seg)\n matches = np.squeeze(match_template(sg, seg))\n\n import peakutils\n md = np.shape(seg)[0]/2\n threshold = thr*np.max(matches)\n indices = peakutils.indexes(matches, thres=threshold, min_dist=md)\n return indices",
"def find_str_all(pat, txt, overlap=True):\n # TODO: // modify the following so it finds all the elements\n all_matches = []\n i = 0\n while i < len(txt) - len(pat) + 1:\n found = True\n for c, c_to_find in zip(txt[i:], pat):\n if c != c_to_find:\n found = False\n break\n if found:\n all_matches.append(i)\n if not overlap:\n i += len(pat)\n else:\n i += 1\n else:\n i += 1\n\n if len(all_matches) == 0:\n return -1\n return all_matches",
"def find_start(maze):\n for line in maze:\n for char in line:\n if char.value == \"P\":\n startpos = (char.x, char.y)\n\n if (debug == 1):\n print(startpos)\n \n return startpos"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Parse command line and return a socket address.
|
def parse_command_line(description):
parser = argparse.ArgumentParser(description=description)
parser.add_argument('host', help='IP or hostname')
parser.add_argument('-p', metavar='port', type=int, default=1060,
help='TCP port (default 1060)')
args = parser.parse_args()
address = (args.host, args.p)
return address
|
[
"def parse_address(addr):\n if ':' in addr:\n try:\n host, port = addr.split(':')\n except ValueError:\n raise ValueError('Invalid address: %s' % addr)\n else:\n host, port = 'localhost', addr\n if host == '*':\n host = '' # any\n try:\n return (host, int(port))\n except ValueError:\n raise ValueError('Invalid address: %s' % addr)",
"def bind_address(self):\n return \"\"\"--bind-address=ip_address\"\"\"",
"def parseArgs():\n if len(sys.argv) < 2:\n print 'Usage:\\n%s UDP_PORT [KNOWN_NODE_IP KNOWN_NODE_PORT]' % sys.argv[0]\n print 'or:\\n%s UDP_PORT [FILE_WITH_KNOWN_NODES]' % sys.argv[0]\n print '\\nIf a file is specified, it should containg one IP address and UDP port\\nper line, seperated by a space.'\n sys.exit(1)\n try:\n int(sys.argv[1])\n except ValueError:\n print '\\nUDP_PORT must be an integer value.\\n'\n print 'Usage:\\n%s UDP_PORT [KNOWN_NODE_IP KNOWN_NODE_PORT]' % sys.argv[0]\n print 'or:\\n%s UDP_PORT [FILE_WITH_KNOWN_NODES]' % sys.argv[0]\n print '\\nIf a file is specified, it should contain one IP address and UDP port\\nper line, seperated by a space.'\n sys.exit(1)\n if len(sys.argv) == 4:\n knownNodes = [(sys.argv[2], int(sys.argv[3]))]\n elif len(sys.argv) == 3:\n knownNodes = []\n f = open(sys.argv[2], 'r')\n lines = f.readlines()\n f.close()\n for line in lines:\n ipAddress, udpPort = line.split()\n knownNodes.append((ipAddress, int(udpPort)))\n else:\n knownNodes = None\n return knownNodes",
"def get_addr(self):\n return self._ip + ':' + str(self._port)",
"def parse_args():\n parser = argparse.ArgumentParser(description='Parse Client args.')\n parser.add_argument('-p', '--port', type=int, default=8080,\n help='Set the port to talk to')\n parser.add_argument('-m', '--message', type=str,\n help='Message to send')\n return parser.parse_args()",
"def test_host():\n parser = create_parser()\n parsed_arguments = parser.parse_args([\"--host\", \"test\"])\n assert parsed_arguments.host == \"test\", \"Wrong host\"",
"def get_addr(host, port):\n if \":\" in host: # IPv6\n return \"[%s]:%s\" % (host, port)\n else: # IPv4\n return \"%s:%s\" % (host, port)",
"def dig_get_host_name():\n dig_args = [\"+short\", \"myip.opendns.com\", \"@resolver1.opendns.com\"]\n addr = dig(dig_args)\n\n # i've had cases of cmd running w/o error, but\n # returning blank str\n if not addr:\n raise ValueError\n\n return addr",
"def get_bind_addr(conf, default_port=None):\r\n return (conf.bind_host, conf.bind_port or default_port)",
"def get_socket(self, addr):\n return self.connections[addr].sock",
"def parse_address(address: str) -> Optional[Tuple[str, int, Optional[bool]]]:\n try:\n raw_host, _, raw_port = address.rpartition(\":\")\n\n port = int(raw_port)\n\n if port > 65535 or port < 1:\n raise ValueError(\"Port number is invalid.\")\n\n try:\n host = raw_host.translate({ord(i): None for i in \"[]\"})\n version = ip_address(host).version == IPV6\n except ValueError:\n host = raw_host\n version = None\n\n return host, port, version\n\n except ValueError:\n return None",
"def test_default_host():\n parser = create_parser()\n parsed_arguments = parser.parse_args([])\n assert parsed_arguments.host == \"127.0.0.1\", \"Wrong host\"",
"def addr(self):\n return (self.ip, self.port)",
"def socket_path(self):\n return self._shell._socket_path",
"def get_fluentd_syslog_src_port():\n for port in range(25229, 25424):\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('', port))\n s.close()\n return port\n except Exception as e:\n pass\n return -1",
"def get_ccnet_server_addr_port():\n return seaserv.CCNET_SERVER_ADDR, seaserv.CCNET_SERVER_PORT",
"def parse_address(addr, strict=False):\n if not isinstance(addr, six.string_types):\n raise TypeError(\"expected str, got %r\" % addr.__class__.__name__)\n scheme, sep, loc = addr.rpartition(\"://\")\n if strict and not sep:\n msg = (\n \"Invalid url scheme. \"\n \"Must include protocol like tcp://localhost:8000. \"\n \"Got %s\" % addr\n )\n raise ValueError(msg)\n if not sep:\n scheme = DEFAULT_SCHEME\n return scheme, loc",
"def get_device_hostname_cli():\n hostname_pattern = re.compile(\"\\nhostname (.*)\\n\")\n hostname_config = cli.cli(\"show run | inc hostname\")\n hostname = hostname_pattern.match(hostname_config).group(1)\n return hostname",
"def get_server_addr(self):\n raise NotImplementedError",
"def lookup(cls, address: str):\n\n host, port = parse_address(address)\n if port is None:\n port = 25565\n try:\n answers = dns.resolver.resolve(\"_minecraft._tcp.\" + host, \"SRV\")\n if len(answers):\n answer = answers[0]\n host = str(answer.target).rstrip(\".\")\n port = int(answer.port)\n except Exception:\n pass\n\n return cls(host, port)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Converse with a client over `sock` until they are done talking.
|
def handle_conversation(sock, address):
try:
while True:
handle_request(sock)
except EOFError:
print('Client socket to {} has closed'.format(address))
except Exception as e:
print('Client {} error: {}'.format(address, e))
finally:
sock.close()
|
[
"def forward(self, client_sock, server_sock):\r\n \r\n # Once we're here, we are not supposed to \"speak\" with the client\r\n # anymore. So any error means for us to close the connection.\r\n print thread.get_ident(), 'Forwarding.'\r\n # These are not used to anything significant now, but I keep them in\r\n # case I would want to do some statistics/logging.\r\n octets_in, octets_out = 0, 0\r\n try:\r\n try:\r\n # Here are the sockets we will be listening.\r\n sockslist = [client_sock, server_sock]\r\n while 1:\r\n # Let us listen...\r\n readables, writeables, exceptions = select.select(\r\n sockslist, [], [],\r\n self.server.Options['inactivity_timeout'])\r\n # If the \"exceptions\" list is not empty or if we are here\r\n # because of the timer (i.e. all lists are empty), then\r\n # we must must bail out, we have finished our work.\r\n if (exceptions\r\n or (readables, writeables, exceptions) == ([], [], [])):\r\n raise Connection_Closed\r\n\r\n # Only a precaution. \r\n data = ''\r\n\r\n # Just in case we would be in the improbable case of data\r\n # awaiting to be read on both sockets, we treat the\r\n # \"readables\" list as if it oculd contain more than one\r\n # element. Thus the \"for\" loop...\r\n for readable_sock in readables:\r\n # We know the socket we want to read of, but we still\r\n # must find what is the other socket. This method\r\n # builds a list containing one element.\r\n writeableslist = [client_sock, server_sock]\r\n writeableslist.remove(readable_sock)\r\n\r\n # We read one chunk of data and then send it to the\r\n # other socket\r\n data = readable_sock.recv(\r\n self.server.Options['data_buf_size'])\r\n # We must handle the case where data=='' because of a\r\n # bug: we sometimes end with an half-closed socket,\r\n # i.e. a socket closed by the peer, on which one can\r\n # always read, but where there is no data to read.\r\n # This must be detected or it would lead to an infinite\r\n # loop.\r\n if data:\r\n writeableslist[0].send(data)\r\n # This is only for future logging/stats.\r\n if readable_sock == client_sock:\r\n octets_out += len(data)\r\n else:\r\n octets_in += len(data)\r\n else:\r\n # The sock is readable but nothing can be read.\r\n # This means a poorly detected connection close.\r\n raise Connection_Closed\r\n # If one peer closes its conenction, we have finished our work.\r\n except socket.error:\r\n exception, value, traceback = sys.exc_info()\r\n if value[0] == ERR_CONNECTION_RESET_BY_PEER:\r\n raise Connection_Closed\r\n raise\r\n finally:\r\n print thread.get_ident(), octets_in, 'octets in and', octets_out, 'octets out. Connection closed.'",
"def listen_for_client(client_sock):\n global separator_token\n global client_sockets\n\n while True:\n try:\n # keep listening for a message from `client_sock` socket\n msg = client_sock.recv(MESSAGE_SIZE).decode()\n except Exception as e:\n # client no longer connected\n # remove it from the set\n print(f\"[!] Error: {e}\")\n\n print(f\"Remove a socket\")\n client_sockets.remove(client_sock)\n break\n\n # とりあえずエコー\n msg = f\"Echo: {msg}\"\n client_sock.send(msg.encode())",
"def recv(self):\n \n ls = [self._sck_listen] + list(self._scks.keys())\n rr, wr, er = select.select(ls, [], ls)\n \n for r in er:\n if r == self._sck_listen:\n print(\"error in the bound socket. quitting.\")\n exit(0)\n print(\"error in socket {0} with id {1}.\".format(\n r, self._scks[r]['session_id']\n ))\n del self._scks[r]\n\n for r in rr:\n if r == self._sck_listen:\n # New client.\n client, addr = r.accept()\n self._scks[client] = dict([\n ('buffer', '')\n , ('pkt-length', 0)\n , ('session_id', -1)\n ])\n # TODO: Do we want to return something here?\n print(\"client connected.\")\n continue\n\n print(\"clients: {0}\".format(\", \".join([ str(s) for s in self._scks.keys() ])))\n\n client_data = self._scks[r]\n try:\n tmp = r.recv(1024)\n except socket.error as e:\n print(\"client socket error: {0}\".format(str(e)))\n del self._scks[r]\n continue\n if tmp == '':\n print(\"client disconnected.\")\n session_id = self._scks[r]['session_id']\n if len([ x for x in self._scks.values() \\\n if x['session_id'] == session_id ]) < 2:\n self._db.del_client(session_id)\n del self._scks[r]\n continue\n client_data['buffer'] += tmp\n\n #print(\"data from client {0}: \\\"{1}\\\".\".format(r, tmp))\n print(\"begin check.\")\n if client_data['pkt-length'] == 0:\n if len(client_data['buffer']) >= 4:\n # Packet length.\n print(\"read packet length.\")\n client_data['pkt-length'] = struct.unpack('<I'\n , client_data['buffer'][:4])[0]\n client_data['buffer'] = client_data['buffer'][4:]\n else:\n print(\"not enough bytes for packet length.\")\n # Not enough bytes for a packet length.\n continue\n if len(client_data['buffer']) < client_data['pkt-length']:\n # Not enough bytes for a packet.\n print(\"packet length known ({0}), not enough bytes for packet.\".format(client_data['pkt-length']))\n continue\n\n # Alright, we have a packet. Take it from the buffer.\n length = client_data['pkt-length']\n packet = client_data['buffer'][:length]\n client_data['buffer'] = client_data['buffer'][length:]\n client_data['pkt-length'] = 0\n\n self._last_session_id = client_data['session_id']\n self._last_socket = r\n\n return (client_data[\"session_id\"], packet)\n\n # Okey, we didn't find any this round.\n return self.recv()",
"def server_loop(self):\n \n self.sock.listen(1)\n\n #Wait for connection from client\n while(True):\n\n self.logger.info(\"Waiting for client to connect...\")\n\n connection, client_address = self.sock.accept()\n data = \"\"\n\n self.logger.info(\"Waiting for client at %s port %s\" % client_address)\n try:\n ## The recv and sendall methods are dynamically bound\n ## to the socket object, so pylint complains about them\n ## not existing. E1101 is disabled for these lines\n length = int(connection.recv(5)) #pylint: disable=E1101\n self.logger.info(\"Receiving %d bytes\" % length)\n data = connection.recv(length) #pylint: disable=E1101\n returndata = self.handle_message(data)\n if (returndata is not None):\n\n self.logger.info(\"Sending %s\" % returndata)\n\n length = len(returndata)\n returndata = \"%5s%s\" % (length, returndata)\n\n connection.sendall(returndata) #pylint: disable=E1101\n finally:\n connection.close()",
"def service_connection(self, sock, mask):\n addr = sock.getpeername()\n conn = self.connections[addr]\n if mask & selectors.EVENT_READ:\n try:\n recv_data = sock.recv(self.RECV_BUFFER)\n if recv_data:\n yield from conn.handle_data(recv_data)\n else: # connection closed by peer\n log.debug('closing connection to %s', str(addr))\n self.close_connection(addr)\n except (ConnectionResetError, ConnectionAbortedError):\n log.info('Connection reset/aborted: %s', addr)\n self.close_connection(addr)\n if mask & selectors.EVENT_WRITE:\n try:\n conn.flush_out_buffer()\n except OSError as e:\n log.info(\"Error sending to peer: %s\", e)",
"def runLT(self, sock):\n # just send entire message without check for completeness\n self.recvFinshed = False\n sentinal_waiter = threading.Thread(target=self.listenForRecvToFinishThread)\n sentinal_waiter.setDaemon(True)\n sentinal_waiter.start()\n while (not self.recvFinshed):\n # send message to receiver at IP, PORT\n if (self.noise < random.random()):\n self.packetsSent += 1\n # send message to receiver at IP, PORT\n sock.sendto(pickle.dumps(next(self.message_generator)), (self.recv_ip, self.recv_port))\n sock.close()\n sentinal_waiter.join()",
"def play_round(socket, is_first_round):\n is_over = run_single_round(socket,is_first_round)\n if(is_over):\n remove_client(socket)",
"def accept_connections(self):\n while True:\n client_sock, addr = self.connection.accept()\n print(f\"{addr} conectado\")\n\n self.clients.append(client_sock)\n client_thread = threading.Thread(target=self.client_handler, args=(client_sock,))\n client_thread.start()",
"def handle_request(sock):\n aphorism = recv_until(sock, b'?')\n answer = get_answer(aphorism)\n sock.sendall(answer)",
"def __handle_peer(self, client_sock):\n\n self.__debug('New child ' + str(threading.currentThread().getName()))\n self.__debug('Connected ' + str(client_sock.getpeername()))\n\n host, port = client_sock.getpeername()\n conn = Peer(port, client_sock)\n while True:\n try:\n msg_type, msg_data = conn.recv_data()\n if msg_type:\n msg_type = msg_type.upper()\n if msg_type not in self.handlers:\n self.__debug('Peer msg not handled') # : %s: %s' % (msg_type, msg_data))\n break\n else:\n self.__debug('Handling peer msg') # : %s: %s' % (msg_type, msg_data))\n disconnect = self.handlers[msg_type](conn, msg_data)\n if disconnect:\n break\n except KeyboardInterrupt:\n raise\n except:\n traceback.print_exc()\n\n self.__debug('Disconnecting ' + str(client_sock.getpeername()))\n conn.close()",
"def handle_client(self, client, addr):\n ip, port = addr\n port = str(port)\n while True:\n try:\n msg = client.recv(1024).decode()\n except:\n return\n if msg == \"connect\":\n # initial message for when a client attempts to connect to server\n continue\n if msg == \"{quit}\":\n self.close_connection(client, (ip, port))\n print(\"%s:%s terminated the connection\"%(ip, port))\n return\n print(f\"\\nMessage receieved from: {ip}\\nSender's Port: {port}\\nMessage: {msg}\")",
"def await_clients(self):\n\n while self.waiting:\n try:\n addr, packets = self.wait_for_packet(.5, Headers.NEW_GAME,\n Headers.IS_HOSTING,\n Headers.LEAVE_GAME)\n for packet in packets:\n if packet.startswith(Headers.NEW_GAME):\n name = packet.split('-')[1]\n if addr not in self.clients:\n print(\"Client joined with name \" + name)\n self.clients[addr] = name\n self.socket.sendto(packet.encode(), addr)\n elif packet.startswith(Headers.IS_HOSTING):\n self.socket.sendto(packet.encode(), addr)\n elif packet.startswith(Headers.LEAVE_GAME):\n print(\"Player with name {} has left the game\".format(self.clients[addr]))\n self.clients.pop(addr, None)\n except timeout: # used to check periodically if still waiting\n pass",
"def handle_connect(self, req):\r\n \r\n # Create a socket to connect to the remote server\r\n remote = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n # From now on, we must not forget to close this socket before leaving.\r\n try:\r\n try:\r\n # Connection to the remote server\r\n print thread.get_ident(), 'Connecting to', req['address']\r\n\r\n\r\n # Possible way to handle the timeout defined in the protocol!\r\n # Make the connect non-blocking, then do a select and keep\r\n # an eye on the writable socket, just as I did with the\r\n # accept() from BIND requests.\r\n # Do this tomorrow... Geez... 00:47... Do this this evening.\r\n \r\n remote.connect(req['address'])\r\n \r\n # The only connection that can be reset here is the one of the\r\n # client, so we don't need to answer. Any other socket\r\n # exception forces us to try to answer to the client.\r\n except socket.error:\r\n exception, value, traceback = sys.exc_info()\r\n if value[0] == ERR_CONNECTION_RESET_BY_PEER:\r\n raise Client_Connection_Closed((ERR_CONNECTION_RESET_BY_PEER, socket.errorTab[ERR_CONNECTION_RESET_BY_PEER]))\r\n else:\r\n raise Remote_Connection_Failed\r\n except:\r\n raise Remote_Connection_Failed\r\n \r\n # From now on we will already have answered to the client.\r\n # Any exception occuring now must make us exit silently.\r\n try:\r\n # Telling the client that the connection it asked for is\r\n # granted.\r\n self.answer_granted()\r\n # Starting to relay information between the two peers.\r\n self.forward(self.request, remote)\r\n # We don't have the right to \"speak\" to the client anymore.\r\n # So any socket failure means a \"connection closed\" and silent\r\n # exit.\r\n except socket.error:\r\n raise Connection_Closed\r\n # Mandatory closing of the remote socket.\r\n finally:\r\n remote.close()",
"def send_recv_loop(conn):\n while True:\n message = conn.recv(256).decode()\n if message == \"\":\n raise EmptyMessageException(\"Message from server empty. Something went wrong.\")\n final = parse_message(conn, message)\n if final:\n break",
"def receive_data(self):\n while 1:\n client, address = self.sock.accept()\n print('Client connection recieved from:', address[0])\n data = client.recv(self.buffer_size)\n if data:\n print(' Response recieved:', data.decode())\n client.send(data)\n client.close()",
"def runTCP(self, sock):\n # connect to receiever, tls handshake\n sock.connect((self.recv_ip, self.recv_port))\n # continue to send massage until...\n\n for block in self.blocks:\n self.packetsSent += 1\n if (self.noise < random.random()):\n # send message to receiver at IP, PORT\n print((block))\n # print(pickle.loads(pickle.dumps(block)))\n sock.sendall(pickle.dumps(block))\n for _ in range(10): # send constant number of sentinals\n sock.sendto(pickle.dumps(None), (self.recv_ip, self.recv_port))",
"def wait_for_client(self):\n print('Waiting for client')\n self.listener == None\n while self.listener == None:\n try:\n self.listener = mpc.Listener((self.address, self.port), authkey=self.authkey)\n self.remote_conn = self.listener.accept()\n print('Connection accepted from:' + self.listener.last_accepted[0] + ':%d' %(self.listener.last_accepted[1]))\n except mp.AuthenticationError:\n print('Client had wrong key')",
"def socket_thread(s):\n global process_end\n try:\n conn, addr = s.accept()\n conn.recv(1024)\n except Exception as e:\n print(e)\n # Notify the main thread to end process.\n process_end = True",
"def read_one_line(sock):\r\n newline_received = False\r\n message = \"\"\r\n while not newline_received:\r\n character = sock.recv(1).decode()\r\n if character == '\\n':\r\n newline_received = True\r\n elif character == '\\r':\r\n pass\r\n else:\r\n message += character\r\n return message",
"def listen_for_client(self, cs):\n while True:\n try:\n msg = cs.recv(1024).decode()\n except Exception as e:\n print(f\"[!] Error: {e}\")\n self.client_sockets.remove(cs)\n else:\n if self.separator_token in msg:\n msg = msg.split(self.separator_token)\n if msg[0] == \"ohad\" and int(msg[1]) == 123:\n pass\n elif msg == \"get devices\":\n self.operation_socket.send(msg.encode())\n else:\n print(msg)\n\n # self.open_devices_server(cs)\n # # # iterate over all connected sockets\n # for client_socket in client_sockets:\n # # and send the message\n # client_socket.send(msg.encode())\n #"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Receive a single client request on `sock` and send the answer.
|
def handle_request(sock):
aphorism = recv_until(sock, b'?')
answer = get_answer(aphorism)
sock.sendall(answer)
|
[
"def read_one_line(sock):\r\n newline_received = False\r\n message = \"\"\r\n while not newline_received:\r\n character = sock.recv(1).decode()\r\n if character == '\\n':\r\n newline_received = True\r\n elif character == '\\r':\r\n pass\r\n else:\r\n message += character\r\n return message",
"def receive_data(self):\n while 1:\n client, address = self.sock.accept()\n print('Client connection recieved from:', address[0])\n data = client.recv(self.buffer_size)\n if data:\n print(' Response recieved:', data.decode())\n client.send(data)\n client.close()",
"def recv(self):\n \n ls = [self._sck_listen] + list(self._scks.keys())\n rr, wr, er = select.select(ls, [], ls)\n \n for r in er:\n if r == self._sck_listen:\n print(\"error in the bound socket. quitting.\")\n exit(0)\n print(\"error in socket {0} with id {1}.\".format(\n r, self._scks[r]['session_id']\n ))\n del self._scks[r]\n\n for r in rr:\n if r == self._sck_listen:\n # New client.\n client, addr = r.accept()\n self._scks[client] = dict([\n ('buffer', '')\n , ('pkt-length', 0)\n , ('session_id', -1)\n ])\n # TODO: Do we want to return something here?\n print(\"client connected.\")\n continue\n\n print(\"clients: {0}\".format(\", \".join([ str(s) for s in self._scks.keys() ])))\n\n client_data = self._scks[r]\n try:\n tmp = r.recv(1024)\n except socket.error as e:\n print(\"client socket error: {0}\".format(str(e)))\n del self._scks[r]\n continue\n if tmp == '':\n print(\"client disconnected.\")\n session_id = self._scks[r]['session_id']\n if len([ x for x in self._scks.values() \\\n if x['session_id'] == session_id ]) < 2:\n self._db.del_client(session_id)\n del self._scks[r]\n continue\n client_data['buffer'] += tmp\n\n #print(\"data from client {0}: \\\"{1}\\\".\".format(r, tmp))\n print(\"begin check.\")\n if client_data['pkt-length'] == 0:\n if len(client_data['buffer']) >= 4:\n # Packet length.\n print(\"read packet length.\")\n client_data['pkt-length'] = struct.unpack('<I'\n , client_data['buffer'][:4])[0]\n client_data['buffer'] = client_data['buffer'][4:]\n else:\n print(\"not enough bytes for packet length.\")\n # Not enough bytes for a packet length.\n continue\n if len(client_data['buffer']) < client_data['pkt-length']:\n # Not enough bytes for a packet.\n print(\"packet length known ({0}), not enough bytes for packet.\".format(client_data['pkt-length']))\n continue\n\n # Alright, we have a packet. Take it from the buffer.\n length = client_data['pkt-length']\n packet = client_data['buffer'][:length]\n client_data['buffer'] = client_data['buffer'][length:]\n client_data['pkt-length'] = 0\n\n self._last_session_id = client_data['session_id']\n self._last_socket = r\n\n return (client_data[\"session_id\"], packet)\n\n # Okey, we didn't find any this round.\n return self.recv()",
"def handle_single_client(client_socket):\r\n request = \"$$$\"\r\n while request != '' and request != 'QUIT':\r\n try:\r\n # receiving data\r\n request, params = receive_client_request(client_socket)\r\n valid = check_client_request(request, params) # CHECKING IF THE REQUEST IS VALID\r\n if valid:\r\n response = handle_client_request(request, params, client_socket) # CHECKING THE RESPONS ACOORDING TO PROTOCOL\r\n send_response_to_client(response, client_socket) # SENDS THE RESPONSE TO CLIENT\r\n else:\r\n send_response_to_client(\"illgal command\", client_socket)\r\n except Exception as e:\r\n print e\r\n return False\r\n if request == \"QUIT\":\r\n return True",
"def recieve(sock):\r\n\r\n try:\r\n data = sock.recv(1024)\r\n #print(data) unlock if you want to see encrypted data raw\r\n decrypted_data = DEScrypt(data, 2, session_key)\r\n data_str = decrypted_data.decode(\"utf-8\")\r\n return \"Server: \" + data_str\r\n except:\r\n print(\"Session ended with gary the chatbot\")\r\n sys.exit(0)",
"def listen_for_client(client_sock):\n global separator_token\n global client_sockets\n\n while True:\n try:\n # keep listening for a message from `client_sock` socket\n msg = client_sock.recv(MESSAGE_SIZE).decode()\n except Exception as e:\n # client no longer connected\n # remove it from the set\n print(f\"[!] Error: {e}\")\n\n print(f\"Remove a socket\")\n client_sockets.remove(client_sock)\n break\n\n # とりあえずエコー\n msg = f\"Echo: {msg}\"\n client_sock.send(msg.encode())",
"def serve_one_request(self,timeout=None):\r\n req = self.connection.recv(timeout=timeout)\r\n if req is not None:\r\n self.handle_request(req)\r\n return req",
"def handle_client(self, client, addr):\n ip, port = addr\n port = str(port)\n while True:\n try:\n msg = client.recv(1024).decode()\n except:\n return\n if msg == \"connect\":\n # initial message for when a client attempts to connect to server\n continue\n if msg == \"{quit}\":\n self.close_connection(client, (ip, port))\n print(\"%s:%s terminated the connection\"%(ip, port))\n return\n print(f\"\\nMessage receieved from: {ip}\\nSender's Port: {port}\\nMessage: {msg}\")",
"def handle_connect(self, req):\r\n \r\n # Create a socket to connect to the remote server\r\n remote = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n # From now on, we must not forget to close this socket before leaving.\r\n try:\r\n try:\r\n # Connection to the remote server\r\n print thread.get_ident(), 'Connecting to', req['address']\r\n\r\n\r\n # Possible way to handle the timeout defined in the protocol!\r\n # Make the connect non-blocking, then do a select and keep\r\n # an eye on the writable socket, just as I did with the\r\n # accept() from BIND requests.\r\n # Do this tomorrow... Geez... 00:47... Do this this evening.\r\n \r\n remote.connect(req['address'])\r\n \r\n # The only connection that can be reset here is the one of the\r\n # client, so we don't need to answer. Any other socket\r\n # exception forces us to try to answer to the client.\r\n except socket.error:\r\n exception, value, traceback = sys.exc_info()\r\n if value[0] == ERR_CONNECTION_RESET_BY_PEER:\r\n raise Client_Connection_Closed((ERR_CONNECTION_RESET_BY_PEER, socket.errorTab[ERR_CONNECTION_RESET_BY_PEER]))\r\n else:\r\n raise Remote_Connection_Failed\r\n except:\r\n raise Remote_Connection_Failed\r\n \r\n # From now on we will already have answered to the client.\r\n # Any exception occuring now must make us exit silently.\r\n try:\r\n # Telling the client that the connection it asked for is\r\n # granted.\r\n self.answer_granted()\r\n # Starting to relay information between the two peers.\r\n self.forward(self.request, remote)\r\n # We don't have the right to \"speak\" to the client anymore.\r\n # So any socket failure means a \"connection closed\" and silent\r\n # exit.\r\n except socket.error:\r\n raise Connection_Closed\r\n # Mandatory closing of the remote socket.\r\n finally:\r\n remote.close()",
"def get_result(self):\n if not self.conn.sock: return\n return self.conn.getresponse()",
"def recv(sock):\n status, message = recv_message(sock)\n\n args = message.split(':')\n return status, args",
"def forward(self, client_sock, server_sock):\r\n \r\n # Once we're here, we are not supposed to \"speak\" with the client\r\n # anymore. So any error means for us to close the connection.\r\n print thread.get_ident(), 'Forwarding.'\r\n # These are not used to anything significant now, but I keep them in\r\n # case I would want to do some statistics/logging.\r\n octets_in, octets_out = 0, 0\r\n try:\r\n try:\r\n # Here are the sockets we will be listening.\r\n sockslist = [client_sock, server_sock]\r\n while 1:\r\n # Let us listen...\r\n readables, writeables, exceptions = select.select(\r\n sockslist, [], [],\r\n self.server.Options['inactivity_timeout'])\r\n # If the \"exceptions\" list is not empty or if we are here\r\n # because of the timer (i.e. all lists are empty), then\r\n # we must must bail out, we have finished our work.\r\n if (exceptions\r\n or (readables, writeables, exceptions) == ([], [], [])):\r\n raise Connection_Closed\r\n\r\n # Only a precaution. \r\n data = ''\r\n\r\n # Just in case we would be in the improbable case of data\r\n # awaiting to be read on both sockets, we treat the\r\n # \"readables\" list as if it oculd contain more than one\r\n # element. Thus the \"for\" loop...\r\n for readable_sock in readables:\r\n # We know the socket we want to read of, but we still\r\n # must find what is the other socket. This method\r\n # builds a list containing one element.\r\n writeableslist = [client_sock, server_sock]\r\n writeableslist.remove(readable_sock)\r\n\r\n # We read one chunk of data and then send it to the\r\n # other socket\r\n data = readable_sock.recv(\r\n self.server.Options['data_buf_size'])\r\n # We must handle the case where data=='' because of a\r\n # bug: we sometimes end with an half-closed socket,\r\n # i.e. a socket closed by the peer, on which one can\r\n # always read, but where there is no data to read.\r\n # This must be detected or it would lead to an infinite\r\n # loop.\r\n if data:\r\n writeableslist[0].send(data)\r\n # This is only for future logging/stats.\r\n if readable_sock == client_sock:\r\n octets_out += len(data)\r\n else:\r\n octets_in += len(data)\r\n else:\r\n # The sock is readable but nothing can be read.\r\n # This means a poorly detected connection close.\r\n raise Connection_Closed\r\n # If one peer closes its conenction, we have finished our work.\r\n except socket.error:\r\n exception, value, traceback = sys.exc_info()\r\n if value[0] == ERR_CONNECTION_RESET_BY_PEER:\r\n raise Connection_Closed\r\n raise\r\n finally:\r\n print thread.get_ident(), octets_in, 'octets in and', octets_out, 'octets out. Connection closed.'",
"def receive_msg(self, client_sock):\n try:\n message_header = client_sock.recv(HEADER_LENGTH)\n if not len(message_header):\n return False\n message_length = int(message_header.decode('utf-8'))\n data = client_sock.recv(message_length)\n return {\"header\": message_header, \"data\": data}\n except Exception as e:\n print(str(e))\n return False",
"def respond(client):\n response = input(\"Enter a value: \")\n client.send(bytes(response, 'utf8'))\n client.close()",
"def serveNoBlock(self):\n self.log(\"Searching for client...\")\n self.s.setblocking(0)\n self.conn, self.clientAddr = self.s.accept() #wait for client to query the server for a connection\n self.log('Connected to ' + self.clientAddr[0] + ':' + str(self.clientAddr[1]))\n return None #only connects to one client ",
"def recv(self,timeout=None):\r\n if self.recv_buffer:\r\n msg = self.recv_buffer.popleft()\r\n else:\r\n try:\r\n msg = self._recv(timeout=timeout)\r\n except zmq.ZMQError, e:\r\n if e.errno != zmq.EAGAIN:\r\n if not self._has_shutdown:\r\n raise\r\n if e.errno not in (zmq.ENOTSUP,zmq.EFAULT,):\r\n raise\r\n return None\r\n else:\r\n if msg is None:\r\n return None\r\n # Parse out the request object and return.\r\n (server_id,client_id,rest) = msg.split(' ', 2)\r\n client = self.ClientClass(self,server_id,client_id)\r\n return self.RequestClass.parse(client,rest)",
"def read_request(self):\n data = b''\n while data.find(b'\\r\\n\\r\\n') == -1:\n r = self.conn.recv(1024)\n # r is empty if socket is closed\n if not r:\n logging.error(\"socket is closed\")\n break\n data += r\n try:\n self.request_line = data.splitlines()[0]\n except Exception as e:\n logging.error(\"recieved data:{0}\".format(data))\n raise e",
"def readline(sock):\n buffer_ = sock.recv(4096)\n done = False\n while not done:\n if \"\\n\" in buffer_:\n (line, buffer_) = buffer_.split(\"\\n\", 1)\n yield line\n else:\n more = sock.recv(4096)\n if not more:\n done = True\n else:\n buffer_ = buffer_ + more\n if buffer_:\n yield buffer_",
"def socket_recv(self):\n recv = 0\n try:\n data = self.sock.recv(self.BLOCKSIZE_RECV)\n recv = len(data)\n if 0 == recv:\n raise Disconnected('Closed by client')\n except socket.error as err:\n raise Disconnected('socket errno %d: %s' % (err[0], err[1],))\n self.bytes_received += recv\n self.last_input_time = time.time()\n\n ## Test for telnet commands, non-telnet bytes\n ## are pushed to self.recv_buffer (side-effect),\n for byte in data:\n self._iac_sniffer(byte)\n return recv"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Receive bytes over socket `sock` until we receive the `suffix`.
|
def recv_until(sock, suffix):
message = sock.recv(4096)
if not message:
raise EOFError('socket closed')
while not message.endswith(suffix):
data = sock.recv(4096)
if not data:
raise IOError('received {!r} then socket closed'.format(message))
message += data
return message
|
[
"def recv_until(sock, suffix):\n message = sock.recv(4096) # arbitrary value of 4KB\n if not message:\n raise EOFError('socket closed')\n while not message.endswith(suffix):\n data = sock.recv(4096)\n if not data:\n raise IOError('received {!r} then socket closed'.format(message))\n message += data\n return message",
"def readexactly(sock, numbytes):\n bytes_received = b\"\"\n count = 0\n while count < numbytes:\n byte = sock.recv(1)\n if byte:\n count += 1\n bytes_received += byte\n else:\n raise asyncio.streams.IncompleteReadError(bytes_received, numbytes-count)\n\n return bytes_received",
"def recvuntil(self, needle: bytes, timeout: Optional[float] = None) -> bytes:\n\n if timeout is None:\n timeout = 30\n\n data = b\"\"\n time_end = time.time() + timeout\n\n # We read one byte at a time so we don't overshoot the goal\n while not data.endswith(needle):\n\n # Check if we have timed out\n if time.time() >= time_end:\n raise ChannelTimeout(data)\n\n next_byte = self.recv(1)\n\n if next_byte is not None:\n data += next_byte\n\n return data",
"def receive_all(self, sock):\n data = b''\n while True:\n part = sock.recv(self.BUFFER_SIZE)\n data += part\n if len(part) < self.BUFFER_SIZE:\n break\n return data",
"def socket_recv(sock, n):\n data = b''\n while len(data) < n:\n packet = sock.recv(n - len(data))\n if not packet:\n return None\n data += packet\n return data",
"def socket_recv(self):\n recv = 0\n try:\n data = self.sock.recv(self.BLOCKSIZE_RECV)\n recv = len(data)\n if 0 == recv:\n raise Disconnected('Closed by client')\n except socket.error as err:\n raise Disconnected('socket errno %d: %s' % (err[0], err[1],))\n self.bytes_received += recv\n self.last_input_time = time.time()\n\n ## Test for telnet commands, non-telnet bytes\n ## are pushed to self.recv_buffer (side-effect),\n for byte in data:\n self._iac_sniffer(byte)\n return recv",
"def recvPacket(sock):\n file_path = b''\n data = b''\n while (data!=b'$'):\n data = sock.recv(1)\n if (data == b''):\n break\n file_path += data\n file_path = file_path.decode('gbk')[0:-1]\n\n return file_path",
"def receive_until(self, end: bytes) -> bytes:\r\n received_bytes = b''\r\n message = self.lora.recv_message(self._RX_TIMEOUT).message\r\n\r\n while message != end:\r\n received_bytes += message\r\n message = self.lora.recv_message(self._RX_TIMEOUT).message\r\n\r\n return received_bytes",
"def recv(self, count):\n if not self._is_connected:\n raise NotConnectedError()\n try:\n data = self._sock.recv(count)\n except _socket.timeout:\n return \"\"\n except _socket.error, (errno, info):\n if errno in timeout_errnos:\n return \"\"\n else:\n raise SocketError(errno, info)\n if not data:\n raise EOFError()\n return data",
"def handle(key, mask):\n sock = key.fileobj\n data = key.data\n if mask & selectors.EVENT_READ:\n try:\n recv = sock.recv(config['buffer'])\n # We've seen ConnectionResetError and TimeoutError\n except socket.error:\n try:\n sel.unregister(sock)\n sock.close()\n except Exception:\n pass\n return\n else:\n if recv:\n data.buf += recv\n else:\n try:\n sel.unregister(sock)\n sock.close()\n except Exception:\n pass\n return\n while data.buf:\n pos = data.buf.find(b'\\r\\n')\n if pos != -1:\n cmd = data.buf[0:pos]\n data.buf = data.buf[pos + 2:]\n data.client.handle(cmd)\n continue\n buflength = len(data.buf)\n if buflength > 512:\n logger.warning(\n '%s, client message too long: %d',\n sock.getpeername()[0],\n buflength,\n )\n if buflength >= config['buffer'] and buflength > 512:\n logger.error(\n '%s, protocol mismatch, terminating',\n sock.getpeername()[0],\n )\n data.client.do_error('Uh, this seems really wrong. Buh-bye!')\n sel.unregister(sock)\n sock.close()\n break",
"def test_02_read_server_stop_period(self):\n self.fake_sfile.set_reply_buf('line 1\\nl2\\nl3\\n.\\r\\nl4\\r\\n')\n self.assertEquals(self.conn._read_server(False)[-1], 'l3')",
"def _recvbytes(self, bytes_needed, sock_buf = None):\r\n\tif sock_buf is None:\r\n\t\tsock_buf = StringIO()\r\n\tbytes_count = 0\r\n\twhile bytes_count < bytes_needed:\r\n\t\tchunk = self.recv(min(bytes_needed - bytes_count, 32768))\r\n\t\tpart_count = len(chunk)\r\n\r\n\t\tif part_count < 1:\r\n\t\t\treturn None\r\n\r\n\t\tbytes_count += part_count\r\n\t\tsock_buf.write(chunk)\r\n\t\r\n\treturn sock_buf",
"def recv_nbytes(sock, n):\n #print n, \"this is \"\n bytes_received = 0\n received = \"\"\n # keep on reading until we get what we expected\n while bytes_received < n:\n ready_to_read,_,_ = select.select([sock],[],[])\n data = sock.recv(1, socket.MSG_PEEK)\n #rint data, \"this is the data\"\n\n if len(data) == 0:\n raise ClientDead\n else:\n assert(ready_to_read != [])\n new_recv = sock.recv(n - bytes_received)\n bytes_received += len(new_recv)\n received += new_recv\n assert(bytes_received == len(received))\n return received",
"def receive_bytes(self, size):\n time_start = datetime.now()\n total_data = \"\"\n last_read = \"\"\n while True:\n last_read = self.request.recv(size)\n total_data += last_read\n size -= len(last_read)\n if size <= 0:\n break\n else:\n time.sleep(0.01)\n time_now = datetime.now()\n time_diff = time_now - time_start\n if time_diff.seconds >= 5:\n raise DataReadTimeoutException()\n return total_data",
"def receive(self):\n\n buffer = self.__data_buffer\n\n # search for the end socket keyword data\n end_pattern_idx = buffer.find(Socket.SOCK_DATA_END)\n while end_pattern_idx == -1:\n chunk = self.__sock.recv(Socket.SOCKET_BUFFER_SIZE)\n if not chunk:\n raise RuntimeError(\"socket connection broken\")\n buffer += chunk\n end_pattern_idx = buffer.find(Socket.SOCK_DATA_END)\n\n self.__data_buffer = buffer[end_pattern_idx + len(Socket.SOCK_DATA_END):]\n\n return pickle.loads(buffer[:end_pattern_idx])",
"def test_read_until(self):\n want = [b'xxxmatchyyy']\n telnet = test_telnet(want)\n data = telnet.read_until(b'match')\n self.assertEqual(data, b'xxxmatch', msg=(telnet.cookedq,\n telnet.rawq, telnet.sock.reads))\n\n reads = [b'x' * 50, b'match', b'y' * 50]\n expect = b''.join(reads[:-1])\n telnet = test_telnet(reads)\n data = telnet.read_until(b'match')\n self.assertEqual(data, expect)",
"def worker():\r\n unprocessed=bytes()\r\n while True:\r\n try:\r\n chunk = self.socket.recv(2048)\r\n if len(chunk)==0: \r\n break\r\n else: \r\n unprocessed+=chunk \r\n result = self._parseData(unprocessed)\r\n #_parse data will return how many bytes was parse or -1 on error\r\n #we trim the pendingData buffer from the left using result as the index\r\n #if result == 0 it means no data was parsed and it will stay in unprocessed buffer until more data has arrived\r\n if result < 0:\r\n sys.stderr.write(\"TcpSocketAdapter._parseData error %d\"%result)\r\n break\r\n elif result > 0:\r\n unprocessed=unprocessed[result:]\r\n except (ConnectionAbortedError, OSError):\r\n break\r\n print(\"socket worker shutting down\")",
"async def receive_until(self, bytes_to_receive: int = 4096, delimiter: bytes = '\\n', timeout: float = 1.0) -> bytes:\n timeout_start = time.time()\n while time.time() < timeout_start + timeout:\n chunk = await self.receive(bytes_to_receive)\n if not chunk:\n break\n if delimiter not in chunk:\n self.buffer.append(chunk)\n continue\n data_list = chunk.split(delimiter)\n self.buffer.append(data_list[0])\n ret = self.buffer.copy()\n self.buffer = [data_list[1]]\n return b''.join(ret)\n\n raise ClientTimeoutError(\"timeout while receiving data\")",
"def wait_for_next_step(self):\n response = Convert.listen(self.socket)\n if len(response) == CHOOSE_LEN:\n return self.start(response)\n elif len(response) == FEED_LEN:\n return self.feed(response)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
adds a complex sequence, such as a cos(), to the signals variable
|
def addcomplexplot(self, coefficient, frequency, constantPhi):
self.signals.append(coefficient * np.cos(frequency * self.samples + constantPhi))
self.frequencies.append(Fraction(frequency / np.pi))
self.coefficients.append(coefficient)
|
[
"def __iadd__(self, *args):\n return _vnl_vectorPython.vnl_vector_vcl_complexF___iadd__(self, *args)",
"def __iadd__(self, *args):\n return _vnl_vectorPython.vnl_vector_vcl_complexD___iadd__(self, *args)",
"def set_complex_coefficients(self, c0, c1, c2):\n self.c0 = c0\n self.c1 = c1\n self.c2 = c2\n self.z = [complex(1/(2 * math.pi * (self.c0 + self.c1 * self.f[i] + self.c2 * self.f[i] ** 2)), 1)\n for i in range(len(self.f))]",
"def __add__(self, *args):\n return _vnl_vectorPython.vnl_vector_vcl_complexF___add__(self, *args)",
"def complex_exp(angle):\r\n return np.cos(angle) + 1j * np.sin(angle)",
"def __add__(self, *args):\n return _vnl_vectorPython.vnl_vector_vcl_complexD___add__(self, *args)",
"def __iadd__(self, *args):\n return _vnl_vectorPython.vnl_vector_vcl_complexLD___iadd__(self, *args)",
"def grade_algebraic(self):\n\n self.complex_num = complex(self.modul * math.cos(self.arg),\n self.modul * math.sin(self.arg))",
"def set_complex_coefficients(self, l0, l1, l2):\n self.l0 = l0\n self.l1 = l1\n self.l2 = l2\n self.z = [complex(2 * math.pi * (self.l0 + self.l1 * self.f[i] + self.l2 * self.f[i] ** 2), 1) for\n i in range(len(self.f))]",
"def cos(self):\n return Ad_Var(np.cos(self._val), -self._ders*np.sin(self._val))",
"def __iadd__(self, __f: 'float') -> \"stdcomplexF &\":\n return _stdcomplexPython.stdcomplexF___iadd__(self, __f)",
"def __add__(self, *args):\n return _vnl_vectorPython.vnl_vector_vcl_complexLD___add__(self, *args)",
"def _f90complex(self, value):\n return '({0:{fmt}}, {1:{fmt}})'.format(value.real, value.imag,\n fmt=self.float_format)",
"def __add__(self, other):\n if isinstance(other, (int, float)):\n real = self.real + other\n i, j, k = self.i, self.j, self.k\n elif isinstance(other, complex):\n raise TypeError(\n 'Cannot add a Quaternion to a complex number. Make the '\n + 'complex number a Quaternion before adding.')\n elif isinstance(other, Quaternion):\n real = self.real + other.real\n i = self.i + other.i\n j = self.j + other.j\n k = self.k + other.k\n else:\n return NotImplemented\n\n return Quaternion(real, i, j, k)",
"def split_complex(cplx, sfx=1, sfy=1):\n return sfx*np.real(cplx), sfy*np.imag(cplx)",
"def cosine_series_coefficient(self,n,L):\n from sage.all import cos, pi\n x = var('x')\n result = sum([(2*f(x)*cos(pi*x*n/L)/L).integrate(x, a, b)\n for (a,b), f in self.list()])\n if is_Expression(result):\n return result.simplify_trig()\n return result",
"def fourier_series_cosine_coefficient(self,n,L):\n from sage.all import cos, pi\n x = var('x')\n result = sum([(f(x)*cos(pi*x*n/L)/L).integrate(x, a, b)\n for (a,b), f in self.list()])\n if is_Expression(result):\n return result.simplify_trig()\n return result",
"def coadd(spectra):\n raise NotImplementedError",
"def music(x, fc, sd, nsignals=1, complex_output=False):\n if x.ndim == 1:\n x = x[:,_np.newaxis]\n if x.shape[0] != sd.shape[1]:\n raise ValueError('Sensor count mismatch in data and steering vector')\n if fc == 0:\n a = _np.ones_like(sd)/_np.sqrt(sd.shape[1])\n else:\n a = _np.exp(-2j*_np.pi*fc*sd)/_np.sqrt(sd.shape[1])\n if complex_output:\n R = covariance(x)\n if _np.linalg.cond(R) > 10000:\n R += _np.random.normal(0, _np.max(_np.abs(R))/1000000, R.shape)\n A, B = _np.linalg.eigh(R)\n idx = A.argsort()[::-1]\n lmbd = A[idx] # Sorted vector of eigenvalues\n B = B[:, idx] # Eigenvectors rearranged accordingly\n En = B[:, nsignals:len(B)] # Noise eigenvectors\n V = _np.matmul(En,En.conj().T)#En.conj().dot(En)#\n w = _np.array([V.dot(a[j])/(a[j].conj().dot(V).dot(a[j])) for j in range(a.shape[0])])\n return w.conj().dot(x)\n else:\n R = covariance(x)\n if _np.linalg.cond(R) > 10000:\n R += _np.random.normal(0, _np.max(_np.abs(R))/1000000, R.shape)\n A, B = _np.linalg.eigh(R)\n idx = A.argsort()[::-1]\n lmbd = A[idx] # Sorted vector of eigenvalues\n B = B[:, idx] # Eigenvectors rearranged according to eigenvalues\n En = B[:, nsignals:len(B)] # Noise eigenvectors\n V = _np.matmul(En,En.conj().T)\n return _np.array([1.0/a[j].conj().dot(V).dot(a[j]).real for j in range(a.shape[0])])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Converts the frequency w0 to fraction form, stripes pi from the input and returns the period N of the discretetime signal in a list. Could also return constant k if added
|
def getperiod(self):
periodN = []
for freq in self.frequencies:
w0fraction = Fraction(freq / np.pi)
periodN.append(w0fraction.denominator * 2)
return periodN
|
[
"def freq(n, dt):\n import numpy as np\n return 1.0*np.arange(n)/n/dt",
"def ITU_R_468_weighting_analog():\n\n z = [0]\n p = [-25903.70104781628,\n +36379.90893732929j-23615.53521363528,\n -36379.90893732929j-23615.53521363528,\n +62460.15645250649j-18743.74669072136,\n -62460.15645250649j-18743.74669072136,\n -62675.1700584679]\n\n # Normalize to +12.2 dB at 6.3 kHz, numerically\n # TODO: Derive exact value with sympy\n b, a = zpk2tf(z, p, 1)\n w, h = freqs(b, a, 2*pi*6300)\n k = 10**(+12.2/20) / abs(h[0])\n\n return z, p, k",
"def bandlimited_dirac(N_sph, d, w_n=None):\n d = utils.asarray_1d(d)\n if w_n is None:\n w_n = np.ones(N_sph + 1)\n assert(len(w_n) == N_sph + 1), \"Provide weight per order.\"\n g_n = np.zeros([(N_sph + 1)**2, len(d)])\n for n, i in enumerate(range(N_sph + 1)):\n g_n[i, :] = w_n[i] * (2 * n + 1) / (4 * np.pi) * \\\n scyspecial.eval_legendre(n, np.cos(d))\n dirac = np.sum(g_n, axis=0)\n return dirac",
"def get_freqs(Fs, n):\n\n return np.linspace(0, Fs / 2, int(n / 2 + 1))",
"def fftfreq(n,d=1.0):\r\n assert isinstance(n, _integer_types)\r\n val = 1.0/(n*d)\r\n results = empty(n, int)\r\n N = (n-1)//2 + 1\r\n p1 = arange(0,N,dtype=int)\r\n results[:N] = p1\r\n p2 = arange(-(n//2),0,dtype=int)\r\n results[N:] = p2\r\n return results * val\r\n #return hstack((arange(0,(n-1)/2 + 1), arange(-(n/2),0))) / (n*d)\r",
"def giveRange(n):\n return [ (n)**2 *2*np.pi , (n+1)**2 *2*np.pi ]",
"def power_spectrum(t, theta0):\n\n theta = restrict_theta(theta0)\n \n # fill in the rest -- take the FFT of theta and return omega_k and \n # the transform of theta\n N = len(t) \n F = (2.0/N)*np.fft.rfft(theta)\n\n k = np.fft.rfftfreq(N)\n kfreq = 2.0*np.pi*k*N/max(t)\n\n return kfreq, F",
"def solve_period(self):\n \n return 2.*np.pi*self.a**(3./2.)/np.sqrt(const.G.value*(self.m1+self.m2))/(24.*3600.)",
"def get_time_audio_signal(fs, wave):\n return np.arange(wave.size)/float(fs)",
"def periodogram(x, window=None, window_len=7):\n n = len(x)\n I_w = np.abs(fft(x))**2 / n\n w = 2 * np.pi * np.arange(n) / n\n w, I_w = w[:int(n/2)+1], I_w[:int(n/2)+1] # Take only values on [0, pi]\n if window:\n I_w = smooth(I_w, window_len=window_len, window=window)\n return w, I_w",
"def get_sample_per_cycle(rate, freq):\n if freq == 0:\n return 0\n return rate / freq",
"def fftfreq(n, dtype=torch.float, device=torch.device(\"cpu\")):\n return (torch.arange(n, dtype=dtype, device=device) + n // 2) % n - n // 2",
"def wavenumber_to_freq(wavenumber):\n return wavenumber * c('cm/s')",
"def wave_vector(self, frequency):\n k = 2. * np.pi / wavelength(frequency)\n return k",
"def cm12freq(wn):\n c = 299792458*1e6/1e12 # speed of light in vac. [μm/ps]\n return c*1e-4*wn",
"def get_period():\n return 250",
"def fourier_frequencies(self):\n return jnp.reciprocal(self.fourier_periods)",
"def freqs_zpk(z, p, k, worN=200):\n k = np.asarray(k)\n if k.size > 1:\n raise ValueError('k must be a single scalar gain')\n\n if worN is None:\n # For backwards compatibility\n w = findfreqs(z, p, 200, kind='zp')\n elif _is_int_type(worN):\n w = findfreqs(z, p, worN, kind='zp')\n else:\n w = worN\n\n w = atleast_1d(w)\n s = 1j * w\n num = polyvalfromroots(s, z)\n den = polyvalfromroots(s, p)\n h = k * num/den\n return w, h",
"def circadian_rhythm(data):\n fvals = np.fft.fft(data) # compute a periodogram\n avals = np.absolute(fvals)\n avals = avals[1:]\n total = np.sum(avals) # normalize the values\n if total != 0:\n avals = avals / total\n return avals[23] # frequency value for a 24 hour cycle"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.